Updating prebuilts and/or headers

c2e810fc3453d74ee0493168dbf7981ba482acd3 - NVIDIA-kernel-module-source-TempVersion/SECURITY.md
7d577fdb9594ae572ff38fdda682a4796ab832ca - NVIDIA-kernel-module-source-TempVersion/COPYING
12f1806bdc25917299525e0e48815306159de132 - NVIDIA-kernel-module-source-TempVersion/Makefile
60176067d89204db2a337983144481c56d94baf2 - NVIDIA-kernel-module-source-TempVersion/README.md
4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile
90d4457b6fec29378645d5932ad82d706942f4a6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh
a0a15eb341be905ced2a09b8c4feb8bb43b4fb39 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf
1d17329caf26cdf931122b3c3b7edf4932f43c38 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h
88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h
ea98628370602119afb1a065ff954784757ddb10 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h
c06b2748cd7c8f86b5864d5e9abe6ecf0ab622f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h
423282211355a8cb20bff268166885ac90e2986c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h
35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h
82940edf4650b9be67275d3a360ef4e63387a0a7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h
4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h
6337f595602bce9d76559de1be90553b52f405d8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h
e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h
5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h
b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h
c45b2faf17ca2a205c56daa11e3cb9d864be2238 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h
349696856890bdbe76f457376648522b35f874ef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h
003b2cbe3d82e467c09371aee86e48d65ae6c29b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h
b02c378ac0521c380fc2403f0520949f785b1db6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h
3100c536eb4c81ae913b92d4bc5905e752301311 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h
7b2e2e6ff278acddc6980b330f68e374f38e0a6c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h
fdbaee144adb26c00776b802560e15f775ed5aef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h
80fcb510fad25cb7a017139f487da1843b7cfcbd - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h
59d537c1d1b284a9d52277aff87c237e3ec2c99d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h
e3362c33fe6c7cdec013eceac31e8f6f38dc465f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h
5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h
4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h
009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h
d721fca5f2317b9b325dedcbfba51c00d0b23648 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h
4b1a6c372a531b0d3e0a4e9815dde74cb222447c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h
94ad0ba9fd6eb21445baec4fddd7c67a30cceefa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h
f3e0f71abf34300d322e313adcd4fcbde9aa6f87 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h
256b5dc6f28738b3ce656c984f01d8f3e13e9faa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h
c57259130166701bf6d5e5bb1968397716d29fc0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h
84e9b6cba7ba26ef4032666f769c5b43fa510aad - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h
df0420a5e3576e5a8b77a7bcefa6888ad62d6fd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h
910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h
42ece56d0459eb9f27b2497de48f08360c4f7f6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h
4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h
906329ae5773732896e6fe94948f7674d0b04c17 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h
2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h
d25291d32caef187daf3589ce4976e4fa6bec70d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h
8c041edbf4ed4fefdfd8006252cf542e34aa617b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h
2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h
17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h
c181ab9960b0c01a7672bc1fe1bc8870f1e8856d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c
0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c
07a2d5fa54ff88a0cb30c0945ef3c33ca630a490 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h
9a0f445fda73c69e1bee7f6b121cbed33fcb01bf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c
c5cfba80ea122c9078f2d44f1538144747d7931b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c
95ae148b016e4111122c2d9f8f004b53e78998f3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c
24fd035338936c76fda8faeb0d8b1cd59875db92 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild
3ee953312a6a246d65520fc4a65407f448d1d2b8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c
cded6e9b6324fd429b865173596c8e549a682bba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c
5f2e279a4abe0dabd478b1589be67df18de4b09d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c
c1ebcfec42f7898dd9d909eacd439d288b80523f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c
dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c
7b1bd10726481626dd51f4eebb693794561c20f6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c
06e7ec77cd21c43f900984553a4960064753e444 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c
d4f2cac6234e5ad337c254875a26d17372f28162 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c
e903f50b2624f33807214973558b9ff380bd68e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c
805042e7cdb9663a0d3ca3064baeec8aa8eb3688 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c
c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c
0b0ec8d75dfece909db55136731196162c4152d5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c
84d84563c003d3f568068e7322ce314387a6f579 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c
94c406f36836c3396b0ca08b4ff71496666b9c43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c
fbae5663e3c278d8206d07ec6446ca4c2781795f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h
2c0d17f9babe897435c7dfa43adb96020f45da2b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c
9b701fe42a0e87d62c58b15c553086a608e89f7b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h
0ce95e5ed52d6d6ca2bb6aac33ca8f197145ec45 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c
cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h
02b1936dd9a9e30141245209d79b8304b7f12eb9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c
26c3971ea7afb4b7f237db9ab1c321c3de814518 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c
218aac0c408be15523a2d0b70fdbdadd7e1a2e48 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h
5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c
9999872b1513360d8ecf6c0894f81c63e7d435e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c
fc566df59becef7bc7511ae62a9a97b1532a5af2 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h
a3626bf1b80a81c14408c5181e8bd27696df2caf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c
98c1be29932b843453567d4ada2f9912ea4523d7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c
0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c
61eadfa0f5b44a3d95e4d2d42d79321fc909c661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c
4eee7319202366822e17d29ecec9f662c075e7ac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c
495bcdff3847ff67ba4bbf9af23729eb66eed487 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c
64f1c96761f6d9e7e02ab049dd0c810196568036 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c
d844fcaa5b02f1d1a753965a336287148b2ce689 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h
dc165103f9196f5f9e97433ec32ef6dded86d4bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c
68d781e929d103e6fa55fa92b5d4f933fbfb6526 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h
978d00b0d319c5ad5c0d3732b0e44f4ac0ac9a4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h
fbfa2125b2bac1953af6d6fd99352898e516a686 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c
027fd0ab218eb98abe2b66d05f10b14ebb57e7a3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c
07f95171c241880c472a630d1ee38fb222be4d59 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild
a392fa800565c8345b07af5132db7078b914d59f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c
ee894ec530acbd765c04aec93c1c312d42210aeb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c
f179d308e984ff44a82f6e1c6007624f1ac916ba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c
e2b0e4ef01bb28ff6dcc10cb44570e185ce82df0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h
7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c
d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c
8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h
eca70b3b8146903ec678a60eebb0462e6ccf4569 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h
3c9a013abdc787a1022b11099af4277c37cd666b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild
e4bb0073eb9d6f965923bb9874e4714518850a27 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h
8b2063f0cc2e328f4f986c2ce556cfb626c89810 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c
6528efa1f8061678b8543c5c0be8761cab860858 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h
ab63f2a971db8bf10585b1a05fe0e3ca180ad6c7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h
2911436a80d67074106c507871f4b480aa307237 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c
8c95aa7ab01dd928974ce7880a532557209bd8e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h
fa8d8d10ae773bb7db3b3ce1df545de0e04c937e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c
23586447526d9ffedd7878b6cf5ba00139fadb5e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
cbcd6e13d84ea6b52db12eda98be38e321888eb0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h
a7bc26c1078e95f9ff49c164f3652787adf1fef3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c
bb1f2105d19b50634d46a92ade7fc5f709ec25d3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c
c8982ace6fc79f75c092662902c0c61371195f0c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c
66b33e4ac9abe09835635f6776c1222deefad741 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h
6d65ea9f067e09831a8196022bfe00a145bec270 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
45ec9fd1abfe9a0c7f9ffaf665014cec89c9e7e6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h
7129c765da5bfb77788441fed39b46dc7dc0fa8e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
59bb05ef214b5c5f2fe3cf70142dabd47ea70650 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h
ef03d0ae581cc0326abe6054249791f8c0faa9a8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c
044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h
708d02c8bcdfb12e4d55896e667821357c8251ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h
dc0fe38909e2f38e919495b7b4f21652a035a3ee - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c
e4efab24f90d397c270568abb337ab815a447fec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h
b775af5899366845f9b87393d17a0ab0f1f6a725 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c
1e05d0ff4e51a10fa3fcd6519dc915bf13aa69c0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h
492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
892cac6dd51ccfde68b3c29a5676504f93ee8cd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c
355126d65ea1472ce3b278066811d4fb764354ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
5209eba37913f5d621a13091783622759706e6e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c
e362c64aa67b47becdbf5c8ba2a245e135adeedf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
9a882b31b2acc9e1ad3909c0061eee536e648aae - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h
5008845a531207899830bcf4568c3463ad0ea6bc - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c
97b6c56b1407de976898e0a8b5a8f38a5211f8bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h
d862cc13c29bbce52f6b380b7a0a45a07fe9cbac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c
c294224282118c70cd546ae024a95479ad9b1de4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c
bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c
5ef40af650eb65b2c87572a1bbfe655d8821f2d5 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c
26f2a36442266c5d2664d509ecfd31094a83e152 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c
9e008270f277e243f9167ab50401602378a2a6e8 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h
07c675d22c4f0f4be6647b65b6487e2d6927c347 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h
667b361db93e35d12d979c47e4d7a68be9aa93b6 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h
881cbcc7ed39ea9198279136205dbe40142be35e - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h
1c947cfc8a133b00727104684764e5bb900c9d28 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h
83044eb5259200922f78ad3248fbc1d4de1ec098 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h
a8e49041c1b95431e604852ad0fa3612548e3c82 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h
e3be7ba45506c42d2fca87e9da45db75ced750ca - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h
f669280a5e86ba51b691e2609fa7d8c223bd85dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c
7c2fe72426fa304315e169e91dc6c1c58b5422fd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c
381e1b8aeaa8bd586c51db1f9b37d3634285c16a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
d2c79c8a4e914519d653d1f14f706ec4a1f787e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c
15d54c86d78404639c7f151adc672e19472dcf4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c
443c0a4b17a0019e4de3032c93c5cac258529f01 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c
90e8ce7638a28cd781b5d30df565116dc1cea9e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h
ba9e382b24f57caa9dcf1c26a60b1f2070b1b9dd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c
28d7b753825d5f4a9402aff14488c125453e95c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c
b4813a5e854e75fb38f460e0c27dca8e1ce8dc21 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c
1290abde75d218ae24f930c3b011042a3f360c2e - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h
4a2ad30f49ed92694b717a99ce7adeeb565e8a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c
439ef00ffa340bd1b6506970d154a33ca4b64b4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c
cfaa569ac3d63484c86e8a8d7a483dd849f96be8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c
49df9034c1634d0a9588e5588efa832a71750a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h
890d8c2898a3277b0fed360301c2dc2688724f47 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c
3023a58fd19d32280607d4027b09fe51fdb7a096 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h
e66a20fc1579b0dd1392033089f97cf170e8cf10 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h
b5bd3a58b499216e4fe0e0c9c99525b07ac237dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c
f531475d8b978bca5b79d39d729b0c9986fe7b36 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h
95dae946088f21339299dae48eeafaab31b97b05 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h
0a04709ebdc4acb12038656c433e10c4e7096518 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c
0e9694d551848d88531f5461a9b3b91611652e9a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f64.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c
00c612847b3bd227a006a4a2697df85866b80315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c
29321080baa7eab86947ac825561fdcff54a0e43 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f32.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c
9a5b93459ace2da23964da98617d6b18006fab86 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF64.c
729e790328168c64d65a1355e990274c249bbb3a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF64Sig.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF32Sig.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF64.c
dde685423af544e5359efdb51b4bf9457c67fa3b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c
577821f706c7de4ca327c1e2fcc34161c96c89f3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f32.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f64.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/softfloat_state.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c
86fdc2472526375539216461732d1db6a9f85b55 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64.c
d701741d8d6a92bb890e53deda1b795f5787f465 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c
c29536f617d71fe30accac44b2f1df61c98a97dc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c
21a6232d93734b01692689258a3fdfbbf4ff089d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c
760fd7c257a1f915b61a1089b2acb143c18a082e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c
fe06512577e642b09196d46430d038d027491e9f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c
1484fc96d7731695bda674e99947280a86990997 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64.c
8e58f0258218475616ff4e6317516d40ad475626 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
e7890082ce426d88b4ec93893da32e306478c0d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c
824383b03952c611154bea0a862da2b9e2a43827 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c
68843a93e1f46195243ef1164f611b759cf19d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c
054b23a974fc8d0bab232be433c4e516e6c1250a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c
0d8e42636a3409a647291fdb388001c2b11bba07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF16.c
1dd1b424087d9c872684df0c1b4063b077992d5f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
86cda6550cb02bbf595d1667573e4be83702a95e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h
21a11759ed2afd746a47c4d78b67640c2d052165 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
252c816378fddab616b1f2a61e9fedd549224483 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
d152bc457b655725185bdff42b36bb96d6e6715e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
1ded4df85ff5fa904fa54c27d681265425be1658 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h
f36c896cfa01f1de9f9420189319e4e00c7fc52a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h
9645e179cf888bcd0e3836e8126b204b4b42b315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h
de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h
b882497ae393bf66a728dae395b64ac53602a1a5 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h
91e9bc3214d6bb9b20bc8001d85fe8699df5184a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h
88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h
f28f98589e65b71e47dbcb2c4230538ae0545e75 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h
4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvsecurityinfo.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h
ae60d53603c7ddbbd72d4e16ce2951f3d42aed32 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h
a31b82c454df785a1d7893af38e83443cfe6f2fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h
ffa91e1110a5cc286ec44a7bda5461b2be941ea2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h
821a01976045d7c3d2ac35b0f115e90a9e95f8e8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h
5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatus.h
50d31a6d133b0ea9230f9dc1b701ce16a88a7935 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h
eb42327a2b948b79edc04d9145c7aa5b2a2b420e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h
9f2e225f027f5a04d1104d29a0039cd2bb7dd85a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h
a9bf4969ae3e39cc315b6180ee7055e0ad1279c6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h
00e9a0ace4b59958a8b048229fb22b4d9e2f8864 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
3449834cb8b8c630ab1de6df30503c846b26e86b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
f779cd0470e428160fc590b590f2cd4855950058 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
7c4aef225d174ecbe1130d63b8e8ff752bddf48e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
5abe75cf18a2fede23529194b406c3cf742edced - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
c8490da9f200f4dbbac7ebe636f3a83485f3001c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
1022bba330a71b92dcc81f47ba460209fcc70cd0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
b72318d58806bfd25f922107a606b222baa2e28c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
7a0c878431a9b0d9dda117f165946b1cdf8ebbde - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
e2d8133537e2687df022c6a966c55fbfea1974f3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
9c6a4f1d864b5161564869b19f8cb2ce9d629c1d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
0639d6cd553994aff4195e8e7547eebf8e713145 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
79204c26eb58ee812cc2f72ee1f6d4d7d93817c7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
ea9aac6f0e23f0de444ac3919c35e4b78c18c942 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
f7435e356d54d682a949734574388abbe7ffe1d0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
64f849ed19609320461b8938f24f0b40fb1a35b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
d107e41878b5bc50a5c8b29684122c9589625a6f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
f4a4eeb35e15e0642d1bf4e2e5b31394f4cbbfa1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
b7b0360b1a6ca78267fa10f7adcd370da86513c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
862a17958488d69ca3e92c42ee1bed55cb299fa4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
bb4182eeea20779f62165d2d50ed209b6a07e54e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
b7f2957f506dc285acb87d41d34cfd60408b00ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h
c72f147e8fb78126d13567278239acfcd9b9cc1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
8dd5acedc0b1613314eb3fe9130a9c282bd49ca1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h
681c94b982e29049638814f6c1e4eb508f8b0bf3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
3646710984d5c3024d16f9ab346222ad6dfdb4f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
6c34803c213ea0a28114bc921e1867cefebec088 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
76c9f104e04a8fd9e73e03ad59b2e72264c5f169 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
9e61da81ecdff15d63f9ae8a1c2f0960b820c65c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
dac18fcaf5d652b21f84cfba455f4f5972e786c5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
d51e47795dfe1fc0bae31b9379d6a39ac4d3080f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
8a613db1c31724a577c4718752c15d9754882f48 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
3966d65c9701bf97c807cf87838a08cda10f418d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
a1830232f18afe44230d6a8598c50b3fc7656089 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
2dd40e3e41d74de3865bc700acc9ab7e0540c647 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
f97e7f88aa17788bbbebf55807e449c0ee016384 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
b2b6b3b413ae17af1afde2fc8672cd1bf48e7b19 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
3c7130d0613d3c8baef6b23bb63c6ee7a10ed21b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
39f5e838aa6ab007c56e7a59c7d2986d1a7aa34a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
6679d97e3852ed78ee44780408c523b94f426ca4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
090f908931690302e3a2c77f3ce41c4de0c61efc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
7c4e426dee0ae86c00b3bd10873a1a2bd94ed3b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
5bdddb9a949a78443f83a7da81ad5fee8a300c44 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
d084d99035f4cc34cd803ff4a5328b9e10ea77fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
4b8fa2ce546ae3f06b7dc61df3d534449cdb5b2d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
8855ee8bad2f2169ebd147e7ac77d9f1340cbad8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
82a2e7a2fc6501163d07870f3f640a591f4a8996 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
f3a855fe7a91c2acf2be41629ce906996e01a9fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
3d8e37aa8485aadf55335d8f9f913273d90a2442 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
da220a5608a0e4c73fa0315b13e2b29d92b114e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
6834a9c75265c25adfb03f0b2dbfe0559f28cadf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
051dbfd1d5ff02b2771bc9b3fad8aaef29aab9ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
c3a75647f5ca6cd7b456511af36a9de6d90329c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
82364e263f43ea028c2d66db58887958bdef64b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
143c1c24ec926142d1f84dec7a543f2b98541545 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
1684a3a8111fd3d83363cebe68d016a54eaaf686 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
72292c9844eaf24c38967dd4a879c0c0f070a0de - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
091f7bac99f5c786a64b6fa59d9d27af786bab10 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
c0181e959c1ba5ebfc3f130c8764687b58453f9b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
2a11fc0a499f8293b83e08572f5e6be04bd1da61 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
a44d2f1b31b8ec124355018204909df19df09748 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
8ef946f1d7545277ef64891b45a29db44c4e9913 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
774fd1e730d1d853bf97946f7ecd24c6648c7af4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
22d828c87b223f937c589a0e863a25d95b734371 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
7d3819683e9f562a87f36a3e23c043b2b6fd814e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
7d27fafff043d290b2ec1d2dddbecea2f1df4704 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
27ad8b5c2406fcd572cd098dd215e93ae1db99e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
783db6da0b92b6b8ae26b180129beb0bccb13a5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
e6f6beaed64167088608027b442f5449cff027c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h
6b4418e269bb97b9996b05ea153ccd195c661e11 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
0ac7e4eb4d952c84c6f4e697cbfcb355069377c2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
1651ec548a2899391a05bc6463b3f7162c7807ab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
bc22bf13b7d99ee6f80c30b569e084a2b03e385a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
1ebfe9f0f9a7d2dd2873df82bbc78b1ec982ca93 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
291f91212d5a37aae46a2944cf89f4b74b1d1809 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
82aa4d6108ce6abebcbbc95afcb7a6350e287f5f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
c4474dc1f53661c67d8fce5303dcc636d9ad3b8f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
18814de559257f07bad8a0a9006ac9751fcfa1cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
e9d692b06c70951dbbd0663a89f822153bce1146 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
1248e113751f8ed9e4111e86a7f7fb632b102eca - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
b921747a65c67fa093de08fa782c164d048824b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
7e0773f7bf13350a9fd25b0df4d6c45a55a008df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
8fd661537cc4eb55c167b9daae404bfb82408bfe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
f88f1c519a242dfa71221bdcdafc7deab14d8503 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
ccc48726d7da49cddc4d4f86d8dbd2ad585f7b38 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
3dc187adc0a848e68f62a6a7eb99ac02ee6502cc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
f3b81a241efe1224798b17c062e33936469c3c2b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
09dedebdcff3244ab8f607a7152e9116d821f9c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
440314f66374d35a1628ee8bd61836a80ab421eb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
92be535d68a7f18088921faa3f1742298ad341c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
84fb76f9cff38c797b139cba40175717591d49df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
2f92bebccb9da5246b19bd13ff0e6e79de79bc3b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
aec1b750866e34f9626e48c535336f93c5c246fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h
9031642283b59ee6d52e2e1ca54332df5c2f7acc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
e10cbe4875736ef16072232789dd3f48647c022f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
91cccede5c4f26a6b6ca7ba4bc292f3d908a88d4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
f47136417885a729f9c5dee375ec9dec1bd170e0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
f523fe4a55a6a9d01f41f9f34ff149ed75b2e739 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h
ad7604ced12ee18c569d2a7ebe71e185ebff3fd4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
209ef519cb73395cea7d66016448ebc3c6bf6fe4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
4a3e7d71b9169d703d9373ff80b02a63825a80e4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
4d9116d23d27a3fc39c366f2685243b83ef7d485 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
abe79ad927e7c70b7c1a8eb687052a782efcd5f4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
ef180860a1ccbcb9f5d2f8a6656a345eef76a2a7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
f7e56d494fea02515180f21b0f56ae0aff583be4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
b66a45c83c84f6d458ef19fd7e0f972f2eabd109 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
2518a62952c72ee6f3447bc8dc417129f6ac26a4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
9373c51ca29afec3368fb5b8c2a2f05b0920f291 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
0ee647b929e55cf39da7e26ffc0f027676fa52fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
6e5b278451308efbb6911a8ab03b0feba504d035 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
c905766589d17fcb99a5d73846ed61f7b7db56fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
323fcc6af8c30d5ef292ae90810c5c2fa2009e20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
382dc80790d870047db7cea957ef208d4439801e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h
825f4d976c76d375803e42967fdab53e7814d18d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
8294d43d202a9cd78367f2e69388a6c6f2c369f7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
cf78a847e0882e1d164eccdb86ea033126019599 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
76c31150e2f589fbb96cfc06cdc6c1801e128656 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
7f5548026751a8caaebc245945ccdc4bb037b566 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
7812ba094d95c1b6d65afc6a1d26930400b8b96f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
f1dae17e75a24c28135cf073bf29f9609a2418e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
24782552a13f627e2e94ebb5f7021246a0c0dc53 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
127f78d2bb92ef3f74effd00c2c67cf7db5382fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h
4a6444c347825e06bdd62401120553469f79c188 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
2f87e87bcf9f38017ad84417d332a6aa7022c88f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h
0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h
c2600834921f8a6aad6a0404076fa76f9bc1c04d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h
861b9d7581eab4a2b8cc7269b5d0e0d1294048d1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h
92c2dab6bc48f32f46c6bbc282c63cb4ec7a50bf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h
a30755b3003023c093f8724cf9a2e0b0c301b586 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h
bb8d15aee43e1feb76fddf80398e93fd805f1ddb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h
ccefba28a2c7979701f963f2c358b4414b84ca98 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h
2e3d5c71793820d90973d547d8afdf41ff989f89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h
204feb997ba42deab327d570e5f12235d5160f00 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h
03ab4e08e8685696477b62eb1a825e5198d61b8a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h
545dd1899c6988ffe5f50300232bd862d915cd5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h
022e8405220e482f83629dd482efee81cc49f665 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h
36b0dd6de0d0b49d435a4662c35d1f4ae5b2b1bc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h
02ff42b6686954e4571b8a318575372239db623b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h
82c9df617999f93ebd9362851966f601b8131fdd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h
866977d299eac812b41eb702a517e27bdc56e875 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h
31939808cd46382b1c63bc1e0bd4af953302773f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h
11fd2de68ab82b81211aa20c66a9a6595199f673 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h
05605d914edda157385e430ccdbeb3fcd8ad3c36 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h
9db39be032023bff165cd9d36bee2466617015a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h
76c430d54887ed14cace9409712259e10f042b4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h
e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
ea10b0d938d9314638882fdc20b9158a193f7b08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h
f5760f5054538f4ecf04d94fb1582a80a930bc29 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h
9bd9f416844d798f352fcc6c8aaf2c251253c068 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h
fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h
68c953956a63ef8f7f9bcbe71057af510f4597c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h
8b75d2586151302d181f59d314b6b3f9f80b8986 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h
026f66c4cc7baad36f1af740ae885dae58498e07 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h
15136a724baab270914a01a8c0e8f2c2c83675b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h
4bbb861011139be1c76b521eaa7ae10951d5bf9a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h
d1a19dee52b3318714026f4fcc748cfa4681cd25 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h
435a34753d445eb9711c7132d70bd26df2b8bdab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h
326dbbeb275b4fc29f6a7e2e42b32736474fec04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h
1409efc057e4f0d55602f374ec006f9db7ad3926 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h
bd27ceb75c4604fef53658f16a5012d97c1534b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h
11b19cb8d722146044ad5a12ae96c13ed5b122b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h
1efc9d4aa038f208cd19533f6188ac3a629bf31a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h
4b8f95693f79a036317ab2f85e150c102ad782e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h
a7c7899429766c092ee3ecf5f672b75bef55216c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h
15d1f928a9b3f36065e377e29367577ae92ab065 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h
a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h
b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h
c5ef1b16b2bd2e33f52b71f2b78db789ebb844f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h
ecc56a5803b85187aa95b788aedd4fa2262c1bb6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h
b29ea3f13f501327c060b9ddfac5834ed396414a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h
4d5ccf08ab73343343e0c804002a621996866161 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h
c61f8348c2978eef0a07191aaf92bd73e935f7bd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h
509c56534ed6d48b06494bb22d3cf58d63254a05 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h
26c3ccc33328a66ad3bcfe999424dffda991264f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h
060722ac6a529a379375bb399785cbf2380db4fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h
13f8e49349460ef0480b74a7043d0591cf3eb68f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h
ba76ecbebe0ed71ea861ed7016abbfc16ced2df7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h
bae36cac0a8d83003ded2305409192995d264d04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp
c70d946adb4029b3476873887488748162b88b0b - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp
ac08ccd5c2e3fadf10ae53e46e582489d1579ed0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp
6fd536d1849ea4cce5d9b72d1dcbc1db9c818b4e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp
d63fed0074b22584686ad4d0cdaa4388b42194d6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp
a5df56b2cf8df9d4d8ab6fa2b3521649ef09384a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp
554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp
60994cb1131d4d37b2d3fce6cc59dfea5ebb4129 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp
37eabb1ab51cb38660eb24e294c63c8320750b96 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp
a0d24a4bd71f999adbaa876168adef5a7d95f2b8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp
fa4f4869d3d63c0180f30ae3736600a6627284c6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp
d991afdb694634e9df756184b5951739fc3fd0ab - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp
1543bbaba8f3e149239cf44be3c0d080c624d5ba - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp
56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp
9f31213ab8037d7bb18c96a67d2630d61546544a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp
719d2ddbfb8555636496cb5dd74ee6776059db92 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp
f83b3c17e9f26651f12c8835a682abdd66aed3a2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp
c62ef84471074a9ed428b4a03e644885989b0b83 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp
38fe8122aba8a1bc5745d81192ec7fc75934dd0d - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp
66e91795dc65e1bc13c545a84556d200c8eb7bd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp
4803cde0fffcf89fed46d6deaeba5c96c669a908 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp
fe8007b3d98dad71b17595ecb67af77b198827a0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp
62d03d24af041276ba2abb96fa1634ae4f99ea8a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h
aeadcb0bc061b5db0fdf8aa67c1b5703976aa946 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h
07d22f84e6a386dad251761278a828dab64b6dd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h
11487c992494f502d1c48ff00982998504336800 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h
02b65d96a7a345eaa87042faf6dd94052235009c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h
e27519c72e533a69f7433638a1d292fb9df8772e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h
543efa25367763292067245cbc39c1382c35df77 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h
39aece5465100489867001bf57446bcfc4999c24 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h
6e515f398e9ae1b603e49ec32576ccd0ce5d8828 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h
36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h
7974abf146f1f14cd3e3854ef63ddf52ebbeb222 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h
0f747fdf03bebdcd86dbdf16d00ee2d044bc906c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h
325818d0a4d1b15447923e2ed92c938d293dc079 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h
2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h
d876d77caef3541ae05f310857f3d32e642fba04 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h
b4d8c44957efc90ba97092987e6e43c48e85ac86 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h
78c6d7d85b47636fbb21153425ef90c6d0b2d4e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h
3b74682e142e94b1c68bf619169f12e5805044bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h
8f83883126b853c97e5859dafd98847ec54d36ac - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h
7b7d9a137027fbbedfc041465987fa4ed4198ce4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h
80380945c76c58648756446435d615f74630f2da - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h
e2075486b392d6b231f2f133922ac096ca4bc095 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h
3eea80c74a22de43b6edad21ea5873c791e093e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h
d1e8c84f279cb30978d32c784107c0247afa6e66 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h
5dff32bd1018e2c5c2540ea7fb571dbea596d5b1 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h
020194b85245bad5de4dfe372a7ccb0c247d6ede - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h
2f60ba753549b232e1b995046a356dbe0eced04a - NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h
48f063f09bd9b0cb6c4f47d8911643790b3ffbc8 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h
b85b49fc4ed38a241c79731a02b3b040a654a52a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h
764e5c4364922e3953b4db0411d1d3c3bdac99f4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h
f59a2759281341e56372d3cb37b16715944dd8e1 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h
e015e955a05908d4a2202213353eac89f1b80ff6 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h
b58ed1b4372a5c84d5f3755b7090b196179a2729 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h
b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h
4282574b39d1bcaf394b63aca8769bb52462b89b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h
a27eb14c54c6acb647a95c264b90e25f07fc757e - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h
5257e84f2048b01258c78cec70987f158f6b0c44 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h
3bf0416186ee90833c727f01cc891bd568ea9d0f - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h
d2b4cc6228c4b13ef77e47bf30326826c5662ed4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h
06aa739230c00998e039b0104e5d73da85c322fe - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h
86a59440492fd6f869aef3509f0e64a492b4550d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h
38edc89fd4148b5b013b9e07081ba1e9b34516ac - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h
f9311a35f375c7453d99fdde3876440b54d4cb5a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h
a26df21c3cc3eeb395428101f11da68386e0d72b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h
8159b4189c577d545c1280d7d905a2dc2ba29fa7 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h
96b9560d322f43a980db5d6cc5072e9e81fdb9d2 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h
249d4f7317ce68c3ceb64e2b1ee257cc75eb002b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h
8c43da4fae8a0aeb374ce46ce19eb8c38b552ae4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile
17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h
7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
16a2e187afedf93bade7967816b0723708544e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h
20213d53bb52bf9f38400e35d7963d0f4db22f96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h
8a6f26ccf2e563b78f6e189c999ba470ed35271d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h
853d9005ec695cb5a1c7966a1f93fe0c9c8278cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h
d4889d903bf4de06d85e55b005206ed57f28af69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h
6b21a68e254becdd2641bc456f194f54c23abe51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h
c1c7047929aafc849a924c7fa9f8bc206b8e7524 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h
71e8c5d3c4dfec6f2261654c3fc91210bff78da9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h
64af1df50d2a5b827c1c829a303844de20527522 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h
4f5d723c80f607a0e5f797835d561795dbe40ada - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h
d3f5bc85b538a3a1d4c2389c81001be91205ec9f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h
9c90df1fa1b6dd33a7e330c47e94b5b9194ad419 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h
be3a1682574426c1bf75fcdf88278c18f2783c3f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h
8f1994f3f8d100ddcf8b23f5b24872bed939d885 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h
75e8a8747795fad89b4d2b662477e5454863dcc7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h
d7861e2373ac04ffaf6c15caeba887f727aa41fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h
182a47c12496b8b7da1c4fe7035d6b36d7316322 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h
c8f714e80dd4bb60ceab0c0c7e6a5b3304940946 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h
ef78e73ec9c0b8341bd83306d1f3b2c35e20c43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h
867e3091a945d3d43b2f28393b40edeb9d27597b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h
c1904d38785649d2614563d0cd7de28a15ce4486 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h
118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h
33dbf734c9757c2c40adb2fb185e964870217743 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h
ebafc51b2b274cd1818e471850a5efa9618eb17d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h
4020b2a0d4f177c143db40b33d122017416dfa2e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h
9dd131355ed1e25a7cee7bfef00501cf6427ae92 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h
17f6fbbd5e0a75faec21347b691f44dcb65c01aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h
4625828efd425e1b29835ab91fcc3d2d85e92389 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h
52b6c19cce320677bd3a4dfcf1698b236f29e59e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h
a0cc9f36fdd73c99ad8f264efa58043d42353b0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c
381fba24abae75d98b3ada184ed0cd57335819a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c
281fdc23f82d8bdb94b26d0093b444eb0c056f51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h
445a409950ab8f36cfa24d1dc73e59718d335263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h
2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h
5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h
910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h
e48c2ec8145a6f2099dddb24d2900e3ae94ec02e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
727bd77cfbc9ac4989c2ab7eec171ceb516510aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h
fb242aa7a53983118ee019415076033e596374af - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
f6875ef0da055900ef6ef1da5dc94cba2837e4d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c
01d943d6edb0c647c2b8dbc44460948665b03e7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
394ea31caa5957cfb2c8bb8c3cc0e4703213fe7f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c
ec97ab37cdf2cec0283657c2c04a139a1a168337 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c
85ddb19f89833ca57fd2deff2e2b4566e162a56c - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c
8415bcd6ab34e356374659e965790a0715ed7971 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c
c98f76bcfc7c654a619762ebc3a2599f9aa89f8d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c
5fb73f35841c41e7376531732cb12303224e61ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c
e9626eee225e58ec2d5be756c5015775ca5e54b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c
86da3c7c09354d2c49d95562aba15cbedb543d9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c
fc8182cc1f3af77125dbfa328996bcfe0387cc41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c
05548338a73ade1b3c2ad1cebf1ab5eb16ef6c9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c
07c2f10473e2fbe921b2781cc107b5e56e6373e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c
21c8184de2c9150c21ac5d6fba24e79e513a0a69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c
5c79c271609ebcc739f8d73d7d47f0b376298438 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
b55665d7bceaad04bbf29a68f44536518302c3d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c
f8bdd07a27296ef6aab86cc9dbccf8df811fff24 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c
1918ca3aa611cd9dfc79d46d038ab22706f0b1ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c
24156462f25922c8de5b5d2558db36b2e68b28ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c
c2870190ca4c4d5b3a439386583d0a7c193d6263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c
f27f52dc428a6adeb936c8cf99e1fc2d8b0ad667 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c
5acf19920d56793d96c80e8461b0d0213c871b34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c
c2d0e6bef0c4929a3ca4adfd74bd6168fa4aa000 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c
673ad86616f9863766bfec0e118c918297d32010 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c
94f4736acf7981cebfd74302a21f19cdbafa8d71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c
8f22c278a5839d36f74f85469b2d927d9265cb80 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c
eb09642e8b5d9333699f817caaf20483c840b376 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c
ab17e5b4cafa92aa03691a0c187ef8c9ae53fa59 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c
574b1268ff83e4e5ed4da15609247a5c0ec8f51b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c
45230e56d29c98ea0f10f87c1b16ba70c96f24d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c
8af6062034d464f778969e26d3bf5a9b4cdaccf0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp
69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp
f2a05c29383bfc8631ad31909f31a8351501eb27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp
31767fd551f3c89e5b00f54147b6a8e8fa3320e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
110ac212ee8832c3fa3c4f45d6d33eed0301e992 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp
51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
f96cd982b4c05351faa31d04ac30d6fa7c866bcb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp
f6c3e8bd4ee13970737e96f9d9a3e4d8afdf9695 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
893c70c95809f463c7af6dc9c814527804fcdf53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
aba0bd796d932fa19e8fad55ed683ae57d68bffb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
499e72dad20bcc283ee307471f8539b315211da4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
40cb3c112bbcb6ae83a9186d0c9fa1857cf6a126 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h
1b53bbf5f8452b8057ff2dd7828947a047db38d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
e3679844971ecc4447259fb1bdf4fafbbdff2395 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h
4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h
1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h
fbcbb81ae14e8bfde0d665ad20f9cab9b0bbd9c3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h
ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
cc3b2163238b2a8acb7e3ca213fb1ae6c5f0a409 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h
2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
285ab886f5fad5caf3f6bd0b0c7102bd4c4300bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h
6ebda7ea5b17b7b9bfa9387fc838db9f0c3405a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
9d9035afd7af31f30cdbf2d4c75e5e09180f0981 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c
21ac9d6932199ce0755dbead297eb03c9900f8c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
49dc935d4475b572478c63324f0832c972a4277d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c
532366fd9a288a812eca78b92b304ba3625f8c0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
006e77a594ae98067059ad3d7e93821316859063 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
f134270af5ecd7c5ba91bf5228fe3166b101dd6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c
690927567b5344c8030e2c52d91f824bb94e956c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c
5940d8e83cd0014e3222952eab29eebaaad19b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c
54b912b640bdcae42f38c41694eb20abcaad61a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
fb5272f3d0e465aedbc99ddcabb1c6c428837a6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h
7df66a87c9498ae73c986e60fcb9cb1cbcd19e19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h
28d6a6ae495d9bc032c084980ebf5d94448bcf29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h
31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h
961ed81de50e67eadf163a3a8008ce1fde1d880c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h
4db7387cc1ce08ccc62404b80b19c7f1b685e746 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h
e4d88af4eb51d32288f913d90e490e329884970b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h
35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h
df4d313c66e75fa9f4a1ff8ea2c389a6ecd6eb3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h
bff92c9767308a13df1d0858d5f9c82af155679a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h
db0dc6915302888de06e3aa094d961cfe25e0059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h
059c1ab76a5f097593f0f8a79203e14a9cec6287 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
d50ff73efaf5bc7e9cb3f67ed07ede01e8fad6f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h
671286de97aa63201a363fd7a22c92ee8afe4c7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c
6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c
5a97d4f8ce101908f1a67ffe9cc8ed00b6bf43b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c
1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c
caf2b80fa0f01b9a3efcd8326bf6375455f2e1b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c
4e1be780ac696a61f056933e5550040a2d42c6bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c
0824d200569def5bf480f2a5127911ed0ea881e6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c
f6b4e40b638faf9770b632b404170e1ceb949be5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c
db44a803d81d42bfaf84f7ea1e09dc53c662acef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c
9515ea68cdac85989e4d53d4c1251115291708dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c
08be13ced6566aced2f3446bb657dae8efb41fbe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c
77573c8518ac7622211c4bdd16524d369cc14b96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c
fa854efc5cdf4d167dee13302ee8377191624d95 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c
89543f7085fbc2ca01b5a8baae33b5de921c79e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
0e4c2d88b61a0cf63045fe70e5ba2c81c44e37af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
acb2a62fb60e08eb6d16518c43c974783139813b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c
834efbfff64c0d01272e49a08bd6196e341985a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
dd0bd914c6c7bfeabdd9fe87fb984702e0765624 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c
19447ad30b3fc2ee308bcc45e3409bafa5defe0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
3abbef0a6fc95d6f7c7c5a16cbbbb51aaa457cc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
0918cada217ca1883527fe805fc30babf7b8038d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c
e1a6dfb38025abeb5adfda929f61eb6ee44b5c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
ed25b1e99b860468bbf22c10177e0ba99c73894f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
8cd12c2da71acede5046c772f14aff7cbd88af12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c
01e8b56f7677f5cb7f950d9aa9bd37d04153085b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
629566bf98be863b12e6dc6aab53d8f5ea13988c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
b41502d73d7781496845377cebd0d445b8ca9dc6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
8a418dce9fbeb99d5d6e175ed8c88811866f3450 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
e26ade846573c08f7494f17a233b8a9e14685329 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
d6e1bd038fa0eff5d3684a5a2c766fdac77f1198 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
d4a07d1c6beb7ddb229ed6e5374343b6ce916d84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
bc2b57acc8fa8644615168e3ddbaf7ac161a7a04 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
2bb921b462c4b50d1f42b39b4728374c7433c8cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
086e9a51757c3989dfe0bf89ca6c0b9c7734104a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
56be7a21457145c3c6b2df7beb4c828b7bd1a3b4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c
5be208cc0e1eae1f85f00bb0b502fdba74d6656c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
a64c51c515eb76208a822f1f623d11e2edd8d7ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
a54628e9d2733c6d0470e1e73bca1573e6486ab3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
ef2a3848e0302c09869a34eba1333d19a17acc56 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
2c66e086bb149fb1b9ca8f860566a3f5e391b2f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c
f89e982b0e31a1898e1e4749c9a8ae9f0bb59a0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c
d92267a3394ded5d7d218530fd16ce00a920b1d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c
2279fd14aab9b5f20b8fc21f04dd0fca41e418c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c
11a547cbfdbce000a6e5edf48492f5b930ddbdca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c
81f66675295315cfc52be225c2e9ee912b56fbac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c
569f56831cde7bdc528ac2e543eea485025ec6f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c
05669e008dfd89e5c81381e6c60230c1fe17a876 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c
820b6e63c2b11b0764305c483142f626b6f72038 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c
bc83726df04c30d02a1852a10a22c77fdb3ef7a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h
5f194ba056b018a8194c16b0bbb6e49c1b80a996 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c
e40f6742084cd04252f3ec8b8499a26547b478bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h
3a0f999e390d93b0db8272f55fbec56f6b055fe4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c
78f1e379c3d1df9e34baba77f78f48b8585bdc74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c
8e40d2f35828468f34cf6863f9bf99c20dbfc827 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c
b441ee824e9c15c82956254704949317024ceb41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h
277441b3da96fc01199f1d2f5102490e2e6cd830 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c
38d0205b68ea2c82709b42eb7e8b9cf92cec8828 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c
2f89b9059467e7f67a6a52c46aecae5cb0364ab6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c
46aa43b18480d2eb7519b2dcd0fe6a68c79b8881 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c
f2c7d77e4183994d7ee414e2a87745fcd23d995e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c
6f46dd43e4b3f2ad803a4c9492cb927aebffc1f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c
59d42b6a123b062237b3b6ca382211e35057ef1e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h
ddaf2b8e424df9147a4e2fecf3942b64b1d2b001 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c
68cc7b258f934097e9dc31a38e7e3bf2ce2fe5d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c
c3820fa4bb1192a9317ca834aeee3434c7eb8059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c
ea7be8a55a3310aa1c3926ed69c86a6491925e08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c
70507a8d43797eb3cdc13408ae8635f4a2eebce0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c
af4ffa4b423e07cf40eb863c11dbf515c7104874 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c
1793e056a0afcc5e1f5bb58b207b49c5f1556eca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c
63e5e17280d865ace8cdd8eb8a2598d3d7830ad7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c
8e5af753de1725dd919185c29d03ccb0934fab6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c
fe91b43c37b64472450cc25329d2dea74d2a9fcf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c
c0822891f614e6ec847acb971e68aad8847e0cd7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c
c68f2c96bfc6fce483a332a5824656d72986a145 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/system.c
37000b419d23a8b052fc1218f09815fafb1d89c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c
7b9c95f912b203c68b6ba1f62470dffee4b4efe3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c
677c655b0b8e86bdab13cdd4044de38647b00eec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c
b9eabee9140c62385d070628948af0dcda3b0b1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c
003e3012e87b8f8f655749db88141d74660e8d8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
a5a31b9b62e6d19b934411995c315d4fdac71ca0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
ed24c0406c85dc27f0fca1bac8b0dcb7a60dca2d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c
6aa752ae480e883d077de842f02444151947f82f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
956b7871a267b7d381d1cd7d4689ef1aec1da415 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c
9d9fcd87d784a758659b6cc8a522eaf9beac4b6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c
15f3290908931a9e4d74b0c0ec9e460956e39089 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c
623dad3ec0172ed7b3818caece0db5687d587ff3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
64bd2007101cbf718beb707898e85f40071ae405 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
94acdcebee0cdcbf359b15803ec841e5284e1ff2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c
079893039c2802e1b0e6fcab5d0ee0e4dc608c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
78cbb6428372c25eba0ccf8c08e7d36d18e4bae8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c
6d5915924b4e26a5e7592427e34b77596162d0fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c
cade0f7049cdb2ab423a073887ed20ba1abdb17e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c
8a4e2aec6fc01ce1133cfc7ef80b6363c5394208 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c
8ed5171254e51e59fc5586e729793831165b8c0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c
206dda159ecbc0340ac9329250302c76a504e5a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
d48d51a880fced52ad6e323d984e872ccf9ef3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c
d0ae6d7a363db3fdf54ae1a760630b52a2019637 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c
883ad1cf4ed1714eb74d44d3b9a41d6a4723b650 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c
0c9581aa68a77cb9977a7fbcfd2077ccb618206e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c
dac54d97b38ad722198ec918668f175dc5122e4e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c
d3e5f13be70c8e458401ec9bdad007dfadedcc11 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c
836ba8b401fb6b6fcf4ccde1b644ebaefc3d8ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c
9c40bfebe2c57b972683e45dc15f358aaa2280f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
8f41e7127a65102f0035c03536c701b7ecdaa909 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c
b528ef8e238dd2c22c6549057b54fe33039c6473 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c
b6d6074ca77856fc5fe4ff1534c08c023ee592a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
caff00b37e7f58fde886abcc2737c08526fa089e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h
66e79047600e0a40c50e709c6c82402d9b205ad0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
da86b765702196eb0011ac9d14873fbc1589d48b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
7cdc50ee31b9cde14c0ce6fcd390c5d4564e433d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h
2fa76d2d5ba7212f826b656aa683223a470e484c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c
6f6c83e9ee6d91fc8700e5015440f2bc72e6600b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h
b55b7b59f35d848d5a3b43d63da4d2f7b0af5d3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
7416712aa964befcf8fede86e5a604871a2d00b8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h
6dd0c5f2384610ea075642d8e403ddd8c8db371a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
87ac95cf569bb550adb3577c6a6658d094c59999 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
a045a19d750d48387640ab659bb30f724c34b8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
595a6238b9f04887dd418be43ff31f3e7ca6b121 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c
4418c0344b64740050ff8ef6ee085f0687a323d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c
057ad074f6252f7809a88f918986d7d5aacff568 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c
2389c9dd3b13fd2ff26d2d1342c515579079bc71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c
2975e5cecee2c1fd5f69a8ffc20a49016e83025c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c
f0ce913eb568f85e6e1c1b8965f2cd2b98e81928 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c
cba2c17804f6f2062dc5d75583e4a03e03016d1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h
133e94f73c781709f407b03d8cdfdd8865c39b4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c
801eb295d07258ad70b99cb0fe85f3421690e0c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h
46c1a2066ead316ea69c60dc323bdb649bc11c0f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c
f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h
958d9a2cddc91edfafb5c2f3d9622443ac49a6ef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c
d405e01478d26ea99cc0012fa2d6e0021bbe6213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c
182602832a033b3e2d5f88d4ba8febe63eeb2f9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c
376572489e0d4211663da22d5b0de7c7e740fb29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h
e3c4822ac998ab5c7946919c85011f6172dc35ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c
fa5e1c6001e60f77415d0a8f87c8b548b12e1217 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c
ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h
4fe5357eabd0c5e351fb965ceead308240f68eb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h
4f4acfdefc7b9a0cdfe2d5840cc18c9c33366053 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h
1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c
fbcbeb92e46ba11ac26c04c9688b3ffcf10f5c53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h
e449382e19e4dcfcf0aec0babe5a1c8ce2f4249b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c
87a5ae8e07103074020ba052ca45ab39e918d3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c
47b7744ddd01b821bf2fd25fdb25c8d6d55ee01d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c
c46cae4a17181c48bafc01237b83537df61c41ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c
f42bfa3b5a801358d30f852625d8456290550f46 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h
59a87763c6abdc54828f2785a7d90e43e607bc87 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c
da3cc08f12ccee23bcb1c0d0c757b8bbcb81e4fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h
6fd6953e4ae0af707376a40ea0e4f3e70872be7b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h
162777624d03af2f17dfdc28bc35143e2ec6cdee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c
b82e5db65ad41764f456d6f924c89d76c165e48d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h
63e9d0416d5ca1fdf547b5fba9ec76e54690c9dc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h
b5ddae1e6960b13101aa38b2edc0610aed438ede - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c
499a3d9c61a86b667cc77cf8653a71f7fe85078a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h
ac842d9de5eae74ef02b0a75259fb016b80c6eac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c
88d336f88c9b72ec2c1352d4ebe00c0831eafbca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h
fb78615cde6323784f51d33f2acd61fd4030fee0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c
213ebb4fdfa3c2f64b5f998e2ad990e448d4a104 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h
a6174ad345cfdf926cbb4c86c7e8eeadfccb0ddf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c
fa785f8138598af783aefecf10b141d524e6bb42 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c
de97c5afdc34cb9aff23c3ba166e21f660cf1f47 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h
f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h
53b2c39666e1da206d44d69d54009f20440503bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h
93f9738c0e8aa715592306ddf023adf6b548dcc4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h
2b49950ba8f540ed4231c3334810edbb212bb859 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h
d614f90730e2ee78bc3aae47b4e7976500e166e7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h
4302502637f5c4146cb963801258444f2d8173e1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h
7bb406aa863430507bdf07b5f3e519c0d756220a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c
6f3fc9676df77fa24c49140331b87ed5988ed57c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h
cb02e66e5fc06aa340ab460c977961701e9ba295 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c
079ac6d2a90bd2fc9413e092a729202dbc5f724a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h
65d1ace1e68c9b39cce6db61aa8b86ee47a0ae4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c
e0988b45cf712f1a7662b6f822eaed3ffd9938f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
40c937ca657bda9c0b67bd24c5047d39e596c16c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c
f8e842add67dc070cc011ea103fc56cfd81c8b9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c
3a5457a216d197af8f120c660690a55ee44bdd8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c
21e3cf689d84b1a28e11f66cc68a0bc6713108b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c
edead99d125425ddf8f2fa4e4261b8cc3bf566fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c
b07c2c5e8df4de2bb9d242fd1606f1a57b8a742d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c
bfabd5155af3172e1c0a5a0b66721ff830c7b68f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h
cc635daf3d7a9a176580951841b82e9eb0d6f5ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c
757b3ecf94d0c8914a32c4bd302f8ccfa4027856 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c
6263c1ceca0797d34a102f9846acd1fdef06fb60 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h
3b0e038829647cfe0d8807579db33416a420d1d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h
abda8536d885be1422810c184b936bbc880972eb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c
f6f40d568bcf2ae89547ad054f9b5357bac366ab - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h
ceb4dd72148dfe4a0581631147e8d7636abfd61f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h
41784541b2e9ee778b52e686288fe492c0276fec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c
d32d0b65f5f76cb56ca7cd83c0adfe5cb5330924 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h
d04adc777f547ae6d1369cf4c94963e5abf90b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c
ac3965eea078f1998c3a3041f14212578682e599 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c
0dae533422e24d91a29c82d7be619160bbb6f6be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h
3f5a391895fc900396bae68761fe9b4dcb382ec0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h
c3b4c6a1b90a1547e229bb2973eb19c01e1d0055 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h
285af0d0517cb191387a05ad596f74291ec81737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h
9646d1c4d472ad800c7c93eec15cc03dd9201073 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h
c370a103a4c1c9cf2df3763988e77ef8f7bc6afb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h
2239839c8a780a87e786439a49ab63e25d25001a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h
09597f23d6a5440258656be81e7e6709390128f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h
8e0e60f6d30bbed679c43b4997875989314ee88c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c
dec0f585ca46dc8e1aae49c8ea58db5a415de65c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h
871fd0260ab9c164b8f6a7d1aba4563af622f1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h
205490d6651110f28009e752fa286f818bed22fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h
07a37ff685e68a703455e0ed7db7940697487ed2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c
cc71518b4151dc2ee0592bbd2866d437043d0e1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h
2c28d729456749f16ae03fb48b1e416706762805 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h
59c3612a596ad6b996c9d1506f9893bd1b5effee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c
81a6a28692f50efeebecad125de0585dd711ff36 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h
3f581df19314b273244c4c42ea915ec8ef0d8ce2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h
e839f8a5ebef5f28818bb5824bd7c52320db9a74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h
e0b8f64c042dcbb6340552cb3517dabdeb490f1b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h
7523c2ee9228ad0e2fb3566b23b9720d7896afae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c
ad50b3dbe1685eefe51c4fc296f3eade70789dfb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h
ca042cfcdfe8cc8a141f8bb5c9e6c05d8a71b707 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h
2ab6933e07a84c64dfcbeef3b3f4e3f14249d8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h
ffd4f01212709e321d4097e424fe5d32038f5d8b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
12776c69191b583ffcf0914697cf41802f52ef01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h
05cb2fed8648f07b54dc2e8bacbafb323ea8262e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h
0b15dd4515c5e436a659883a48e62bf3c68bf439 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h
0269da77a8db8efde1debc8236f2b3de2cd2597e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h
1bdccdbabf5ae52fd65b829c35079bb7a8734939 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c
410a759c949904b7ae1eecafb31143fad579c0a1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c
73c598515eb7985c8f4cace0946ec9613960be6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c
73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h
8915f69e67e1f3a809a5479e36280df06ce8dd90 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c
d792fbb20b6ca5f2d62addf6a94b0c5027ae15fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h
6124890a54e529dff8b9d6ecf8f4bebe1e10a8a2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h
cb03502bf603c88b709ec803b60efd1d6f8e5ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h
b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h
7c1b36cca9e8bf1fe18284685a6a80620df348cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h
cd833a822c1ce96c79135ba7221d24f347ceadb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h
a016a7d8e07389736c388cb973f3b2a177ea917d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c
42d784e8b478bbf48293a805aa227f0abdf1923b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c
b29061454e7d8daa0cef0787f12726d105faf5c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c
4b9f2ee66b59181f226e1af5087db6ea80f1ee27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h
23d16b4534103f24fac5bb86eb8bab40e5bcba57 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c
e48b8b6ba9da5630a7ade526acbb94e50d9b636d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h
b86536778197748c707c3e9e4c73c5fbcb037e32 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h
07fd5f5534a6d751107f582ba187c7a53a139954 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h
f4a5684d5a877b90c7ae7b66436117c6feb65f91 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h
ab79a1418b65b9d65081456583169f516dd510c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c
bd048add5f0781d90b55a5293881a2f59ace3070 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h
e50c91a674508b23b072e0dd2edbf743f24b333d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c
df070e15630a11b2f4b64d52228fa5a6e7ab2aa9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h
0f3140b5eae77a6055f32a91cb13b026bbb23905 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h
76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c
14450b18d002d4e1786d4630ef4f1994c07ef188 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h
7b0201852361118f277ee7cc6dd16212c0192f71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h
3d3385445934719abda1fefd4eb0762937be0e61 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c
c4fde03d5939b0eef108fde9c2f10661568f22a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h
76b24227c65570898c19e16bf35b2cad143f3d05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h
61c7d3ac2dc61ee81abd743a6536a439592ee162 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h
bf894a769c46d5d173e3875cd9667bb3fe82feb9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h
c33ab6494c9423c327707fce2bcb771328984a3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h
6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
1938fd2511213c8003864d879cf1c41ae1169a5f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h
cf3d1427394c425c543e253adf443192ca613762 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h
ce3302c1890e2f7990434f7335cb619b12dee854 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
97d0a067e89251672f191788abe81cf26dcb335f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h
61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h
be7da8d1106ee14ff808d86abffb86794299b2df - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h
576216219d27aa887beeccefc22bcead4d1234d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
20416f7239833dcaa743bbf988702610e9251289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
a29f55d5fbc90dade83df3ef3263018633675284 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
82abc2458910250c1a912e023f37e87c1c9bbb9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
b52e6a0499640e651aa4200b2c8a1653df04a420 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
24d01769b39a6dd62574a95fad64443b05872151 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
ccca322d29ae171ee81c95d58e31f1c109429ae7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
5f60ac544252b894ac7ecc0c6dc4446e6275eae5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
61e3704cd51161c9804cb168d5ce4553b7311973 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h
99a27d87c7f1487f8df5781d284c2e9a83525892 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h
497492340cea19a93b62da69ca2000b811c8f5d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h
f3028fbcafe73212a94d295951122b532ff5445b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h
b4bae9ea958b4d014908459e08c93319784c47dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h
c9cb08c7c73c0bdd75a320640d16bf4b4defe873 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h
f19dad1746e639d866c700c2f871fcc0144f2e5e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h
f1713ecc0b3e58e46c346409dbf4630aa6f7f3ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h
255c28b9bd27098382bace05af3ad7f195d12895 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/traceable.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/tracer.h
3a28bf1692efb34d2161907c3781401951cc2d4f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h
c8496199cd808ed4c79d8e149961e721ad96714e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h
e5b881419bc00d925eba9f8493f6b36cf3ce7ca7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h
408c0340350b813c3cba17fd36171075e156df72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h
af25180a08db4d5d20afd09f948b15d8c4d2d738 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h
457c02092adfc1587d6e3cd866e28c567acbc43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h
cbfff1f06eecc99fb5a1c82d43397043058f02fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h
f929d43974893cd155ab2f5f77606f0040fe3e39 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h
b5859c7862fb3eeb266f7213845885789801194a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h
37f267155ddfc3db38f110dbb0397f0463d055ff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h
ed496ab6e8b64d3398f929146e908c5a453a03d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h
b319914c97f9978488e8fb049d39c72ed64fd4d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h
8ef620afdf720259cead00d20fae73d31e59c2f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
4c386104eaead66c66df11258c3f1182b46e96ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
a5f49a031db4171228a27482d091283e84632ace - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h
d15991bc770c5ab41fe746995294c5213efa056b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h
5e9928552086947b10092792db4a8c4c57a84adf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h
2f05394872ffa95d700b7822489fa59f74ad5819 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h
04dba2b7a6a360f3e855a7d6a7484ddcdfb90c19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h
9255fff39d7422ca4a56ba5ab60866779201d3e8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h
e53d5fc9b66dbec4c947224050866cec30b2f537 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h
398e4cd63852a18da6e42b920eacd927a2c38bc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h
ba3c81e9eae32eefbf81818b48fdf6ccd7e73163 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h
18321894aa7631b491ea39edc2d45d1028cdc9c6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h
167f49cccc912430bb6b3cb77395f665a32cc8be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h
1ed5d8ae82f37112b163187fa48d2720957e6bdf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h
62a18f19f79512ebccdf286068e0b557c7926e13 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h
00433b51c4d6254fd4dfc3dcd9b4ad59e485e7c0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h
5cadc87ba685991c7d4c6d453dcc9a2cca4398bf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h
664ff0e10e893923b70425fa49c9c48ed0735573 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h
bdb558ee8f782e6be06fc262820f6bd9ce75bd51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h
56b8bae7756ed36d0831f76f95033f74eaab01db - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
7239704e6fe88b9d75984fb5e9f4b5706502d7f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h
e08146f5de1596f5337c49cfbe180e30e880dedb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h
d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
cd033fe116a41285a979e629a2ee7b11ec99369f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h
2dec1c73507f66736674d203cc4a00813ccb11bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h
a0d3d164eb92280353cdc4458d2561aae8a68c1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h
89ece4711626bf1e4197c69bd5754e2798214d76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h
bacdb2c1a1dbf182a0a3be15efa0a5f83365118f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h
841ddca998b570feb1d59b50d644c8f2b59ae8e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h
b795f5cb77ecd2cc407102900b63977cfb34bbfd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h
3dcee4e110f4c571e7f49fae2f2d0630d008a906 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h
46345715dde843be2890b33f191b2f3b69385e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h
b93c2532babf176f7b91735682e7d7cdc41f96f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h
3e656d5ed1f5df898ec444921ce77a40ead66b28 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h
3ac7ddf3d402f3fd20cffe9d4e93f457de319605 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h
22420ad669a9809602f111385b7840556e58ecff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h
6ad1beaa2783a57330240d47b373930cd36ca5d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h
2805fad632acad045044e0b8417de88032177300 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h
23afbd04f4e4b3301edcfdec003c8e936d898e38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
eedda5c4b0611c3b95f726b0a2db4b0a23b7b1cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h
aafca30178f49676f640be9c6d34f623a3e3a9a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h
600ad8781585e87df49ab1aaa39a07c8e8de74f5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
0747ee16c7e6c726f568867d0fbbad411c8795c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
2a76929dc6b0e8624d02002600bc454cc851dee4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h
31f2042e852f074970644903335af5ffa2b59c38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
65a237b66732aafe39bc4a14d87debd2b094fb83 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h
c9e75f7b02241ededa5328a4f559e70dec60d159 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h
3924b67e6d63e9a15876331c695daaf679454b05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h
a28ab42de95e4878fb46e19d7b965c23f92b3213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h
4cd6b110470da3aee29e999e096ca582104fab21 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h
1dacc1c1efc757c12e4c64eac171474a798b86fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h
969cbac56935a80fafd7cceff157b27e623f9429 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h

Change-Id: I0561bddf423eb47180bdf85ccb3d24cafebfb44d
This commit is contained in:
svcmobrel-release
2023-08-03 21:11:21 -07:00
parent 0872bd5b3b
commit ec8c9e6d6a
1166 changed files with 460174 additions and 0 deletions

View File

@@ -0,0 +1,179 @@
###########################################################################
# Makefile for nv-kernel.o
###########################################################################
NV_MODULE_LOGGING_NAME ?= nvidia
VERSION_MK_DIR = ../../
include ../../utils.mk
include srcs.mk
# The source files for nv-kernel.o are all SRCS and SRCS_CXX defined in srcs.mk,
# and the NVIDIA ID string
ALL_SRCS = $(SRCS) $(SRCS_CXX)
ALL_SRCS += $(NVIDSTRING)
SRC_COMMON = ../common
CONDITIONAL_CFLAGS :=
CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h
CFLAGS += -I kernel/inc
CFLAGS += -I interface
CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc
CFLAGS += -I arch/nvalloc/common/inc
CFLAGS += -I arch/nvalloc/common/inc/deprecated
CFLAGS += -I arch/nvalloc/unix/include
CFLAGS += -I inc
CFLAGS += -I inc/os
CFLAGS += -I $(SRC_COMMON)/shared/inc
CFLAGS += -I $(SRC_COMMON)/shared/msgq/inc
CFLAGS += -I $(SRC_COMMON)/inc
CFLAGS += -I $(SRC_COMMON)/inc/swref/published
CFLAGS += -I generated
CFLAGS += -I $(SRC_COMMON)/nvswitch/kernel/inc
CFLAGS += -I $(SRC_COMMON)/nvswitch/interface
CFLAGS += -I $(SRC_COMMON)/nvswitch/common/inc/
CFLAGS += -I $(SRC_COMMON)/inc/displayport
CFLAGS += -I $(SRC_COMMON)/nvlink/interface/
CFLAGS += -I src/mm/uvm/interface
CFLAGS += -I $(SRC_COMMON)/cyclestats
CFLAGS += -I inc/libraries
CFLAGS += -I src/libraries
CFLAGS += -I inc/kernel
CFLAGS += -I inc/physical
# XXX TODO: review which of these we need for the build
CFLAGS += -Werror-implicit-function-declaration
CFLAGS += -Wwrite-strings
#CFLAGS += -Wformat
#CFLAGS += -Wreturn-type
#CFLAGS += -Wswitch
#CFLAGS += -Wno-multichar
#CFLAGS += -Wno-unused-local-typedefs
#CFLAGS += -Wchar-subscripts
#CFLAGS += -Wparentheses
#CFLAGS += -Wpointer-arith
#CFLAGS += -Wstack-usage=3584
#CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=1)
#CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=0)
CFLAGS += -fno-common
CFLAGS += -ffreestanding
CFLAGS += -fno-stack-protector
ifeq ($(TARGET_ARCH),x86_64)
CFLAGS += -msoft-float
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -mno-mmx
CFLAGS += -mno-sse
CFLAGS += -mno-sse2
CFLAGS += -mno-3dnow
endif
ifeq ($(TARGET_ARCH),aarch64)
CFLAGS += -mgeneral-regs-only
CFLAGS += -march=armv8-a
CFLAGS += -mstrict-align
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mno-outline-atomics)
endif
#CFLAGS += -ffunction-sections
#CFLAGS += -fdata-sections
#CFLAGS += -DDEVELOP
CFLAGS += -fno-pic
CFLAGS += -DGL_EXPERT
CFLAGS += -DNVPMAPI
CFLAGS += -DNVCONFIG_PROFILE=unix_global_internal_profile
CFLAGS += -D_LANGUAGE_C
CFLAGS += -D__NO_CTYPE
CFLAGS += -DNVRM
CFLAGS += -DLOCK_VAL_ENABLED=0
CFLAGS += -DPORT_ATOMIC_64_BIT_SUPPORTED=1
CFLAGS += -DPORT_IS_KERNEL_BUILD=1
CFLAGS += -DPORT_IS_CHECKED_BUILD=1
CFLAGS += -DPORT_MODULE_atomic=1
CFLAGS += -DPORT_MODULE_core=1
CFLAGS += -DPORT_MODULE_cpu=1
CFLAGS += -DPORT_MODULE_crypto=1
CFLAGS += -DPORT_MODULE_debug=1
CFLAGS += -DPORT_MODULE_memory=1
CFLAGS += -DPORT_MODULE_safe=1
CFLAGS += -DPORT_MODULE_string=1
CFLAGS += -DPORT_MODULE_sync=1
CFLAGS += -DPORT_MODULE_thread=1
CFLAGS += -DPORT_MODULE_util=1
CFLAGS += -DPORT_MODULE_example=0
CFLAGS += -DPORT_MODULE_mmio=0
CFLAGS += -DPORT_MODULE_time=0
CFLAGS += -DRS_STANDALONE=0
CFLAGS += -DRS_STANDALONE_TEST=0
CFLAGS += -DRS_COMPATABILITY_MODE=1
CFLAGS += -DRS_PROVIDES_API_STATE=0
CFLAGS += -DNV_CONTAINERS_NO_TEMPLATES
CFLAGS += -DNV_PRINTF_STRINGS_ALLOWED=1
CFLAGS += -DNV_ASSERT_FAILED_USES_STRINGS=1
CFLAGS += -DPORT_ASSERT_FAILED_USES_STRINGS=1
ifeq ($(DEBUG),1)
CFLAGS += -gsplit-dwarf
endif
# Define how to perform dead code elimination: place each symbol in its own
# section at compile time, and garbage collect unreachable sections at link
# time. exports_link_command.txt tells the linker which symbols need to be
# exported from nv-kernel.o so the linker can determine which symbols are
# unreachable.
CFLAGS += -ffunction-sections
CFLAGS += -fdata-sections
NV_KERNEL_O_LDFLAGS += --gc-sections
EXPORTS_LINK_COMMAND = exports_link_command.txt
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fcf-protection=none)
ifeq ($(TARGET_ARCH),x86_64)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern)
endif
CFLAGS += $(CONDITIONAL_CFLAGS)
CC_ONLY_CFLAGS += --std=gnu11
CXX_ONLY_CFLAGS += --std=gnu++11
OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS))
# Define how to generate the NVIDIA ID string
$(eval $(call GENERATE_NVIDSTRING, \
NVRM_ID, \
UNIX Open Kernel Module, $(OBJS)))
# Define how to build each object file from the corresponding source file.
$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src))))
NV_KERNEL_O = $(OUTPUTDIR)/nv-kernel.o
.PNONY: all clean
all: $(NV_KERNEL_O)
LINKER_SCRIPT = nv-kernel.ld
$(NV_KERNEL_O): $(OBJS) $(EXPORTS_LINK_COMMAND) $(LINKER_SCRIPT)
$(call quiet_cmd,LD) \
$(NV_KERNEL_O_LDFLAGS) \
-T $(LINKER_SCRIPT) \
-r -o $(NV_KERNEL_O) $(OBJS) @$(EXPORTS_LINK_COMMAND)
$(call quiet_cmd,OBJCOPY) \
--localize-symbol=memset \
--localize-symbol=memcpy \
$@
clean:
$(RM) -rf $(OUTPUTDIR)

View File

@@ -0,0 +1,162 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file nvrangetypes.h
* @brief Range types and operator macros
* @note #include a header to define NvUxx and NvSxx before sourcing this file.
*/
#ifndef _NVRANGETYPES_H_
#define _NVRANGETYPES_H_
//
// Define range types by convention
//
#define __NV_DEFINE_RANGE_TYPE(T) \
typedef struct NvRange ## T \
{ \
Nv ## T min; \
Nv ## T max; \
} NvRange ## T;
__NV_DEFINE_RANGE_TYPE(U64) // NvRangeU64
__NV_DEFINE_RANGE_TYPE(S64) // NvRangeS64
__NV_DEFINE_RANGE_TYPE(U32) // NvRangeU32
__NV_DEFINE_RANGE_TYPE(S32) // NvRangeS32
__NV_DEFINE_RANGE_TYPE(U16) // NvRangeU16
__NV_DEFINE_RANGE_TYPE(S16) // NvRangeS16
__NV_DEFINE_RANGE_TYPE(U8) // NvRangeU8
__NV_DEFINE_RANGE_TYPE(S8) // NvRangeS8
//
// Operator macros
//
// Macros are named xxx_RANGE (rather than xxx_RANGEU32, etc.) since they work
// properly on ranges with any number of bits, signed or unsigned.
//
#define NV_EQUAL_RANGE(r1, r2) ((r1).min == (r2).min && (r1).max == (r2).max)
#define NV_EMPTY_INCLUSIVE_RANGE(r) ((r).min > (r).max)
#define NV_EMPTY_EXCLUSIVE_RANGE(r) ((r).min + 1 > (r).max - 1)
#define NV_WITHIN_INCLUSIVE_RANGE(r, x) ((r).min <= (x) && (x) <= (r).max)
#define NV_WITHIN_EXCLUSIVE_RANGE(r, x) ((r).min < (x) && (x) < (r).max)
#define NV_IS_SUBSET_RANGE(r1, r2) ((r1).min >= (r2).min && (r2).max >= (r1).max)
#define NV_IS_SUPERSET_RANGE(r1, r2) ((r1).min <= (r2).min && (r2).max <= (r1).max)
#define NV_CENTER_OF_RANGE(r) ((r).min / 2 + ((r).max + 1) / 2) // Avoid overflow and rounding anomalies.
#define NV_IS_OVERLAPPING_RANGE(r1, r2) \
(NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).min) || \
NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).max))
#define NV_DISTANCE_FROM_RANGE(r, x) ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0))
#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x)))
#define NV_VALUE_WITHIN_EXCLUSIVE_RANGE(r, x) ((x) <= (r).min? (r).min + 1 : ((x) >= (r).max? (r).max - 1 : (x)))
#define NV_INIT_RANGE(r, x, y) \
do \
{ \
(r).min = (x); \
(r).max = (y); \
} while(0)
#define NV_ASSIGN_DELTA_RANGE(r, x, d) \
do \
{ \
(r).min = (x) - (d); \
(r).max = (x) + (d); \
} while(0)
#define NV_ASSIGN_INTERSECTION_RANGE(r1, r2) \
do \
{ \
if ((r1).min < (r2).min) \
(r1).min = (r2).min; \
if ((r1).max > (r2).max) \
(r1).max = (r2).max; \
} while(0)
#define NV_ASSIGN_UNION_RANGE(r1, r2) \
do \
{ \
if ((r1).min > (r2).min) \
(r1).min = (r2).min; \
if ((r1).max < (r2).max) \
(r1).max = (r2).max; \
} while(0)
#define NV_MULTIPLY_RANGE(r, x) \
do \
{ \
(r).min *= (x); \
(r).max *= (x); \
} while(0)
#define NV_DIVIDE_FLOOR_RANGE(r, x) \
do \
{ \
(r).min /= (x); \
(r).max /= (x); \
} while(0)
#define NV_DIVIDE_CEILING_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) - 1) / (x); \
(r).max = ((r).max + (x) - 1) / (x); \
} while(0)
#define NV_DIVIDE_ROUND_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) / 2) / (x); \
(r).max = ((r).max + (x) / 2) / (x); \
} while(0)
#define NV_DIVIDE_WIDE_RANGE(r, x) \
do \
{ \
(r).min /= (x); \
(r).max = ((r).max + (x) - 1) / (x); \
} while(0)
#define NV_DIVIDE_NARROW_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) - 1) / (x); \
(r).max /= (x); \
} while(0)
#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) \
((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x)))
#define NV_WITHIN_INCLUSIVE_RANGE(r, x) \
((r).min <= (x) && (x) <= (r).max)
#define NV_DISTANCE_FROM_RANGE(r, x) \
((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0))
#endif // _NVRANGETYPES_H_

View File

@@ -0,0 +1,94 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CAPS_H_
#define _NV_CAPS_H_
#include <nv-kernel-interface-api.h>
/*
* Opaque OS-specific struct; on Linux, this has member
* 'struct proc_dir_entry'.
*/
typedef struct nv_cap nv_cap_t;
/*
* Creates directory named "capabilities" under the provided path.
*
* @param[in] path Absolute path
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_init(const char *path);
/*
* Creates capability directory entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability directory's name
* @param[in] mode Capability directory's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Creates capability file entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability file's name
* @param[in] mode Capability file's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Destroys capability entry
*
* @param[in] cap Capability entry
*/
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap);
/*
* Validates and duplicates the provided file descriptor
*
* @param[in] cap Capability entry
* @param[in] fd File descriptor to be validated
*
* Returns duplicate fd upon success. Otherwise, returns -1.
*/
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd);
/*
* Closes file descriptor
*
* This function should be used to close duplicate file descriptors
* returned by nv_cap_validate_and_dup_fd.
*
* @param[in] fd File descriptor to be validated
*
*/
void NV_API_CALL nv_cap_close_fd(int fd);
#endif /* _NV_CAPS_H_ */

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_GPU_INFO_H_
#define _NV_GPU_INFO_H_
typedef struct {
NvU32 gpu_id;
struct {
NvU32 domain;
NvU8 bus, slot, function;
} pci_info;
/*
* opaque OS-specific pointer; on Linux, this is a pointer to the
* 'struct device' for the GPU.
*/
void *os_device_ptr;
} nv_gpu_info_t;
#define NV_MAX_GPUS 32
#endif /* _NV_GPU_INFO_H_ */

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_NUMBERS_H
#define NV_IOCTL_NUMBERS_H
/* NOTE: using an ioctl() number > 55 will overflow! */
#define NV_IOCTL_MAGIC 'F'
#define NV_IOCTL_BASE 200
#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0)
#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1)
#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6)
#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7)
#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9)
#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10)
#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11)
#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12)
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#endif

View File

@@ -0,0 +1,145 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_H
#define NV_IOCTL_H
#include <nv-ioctl-numbers.h>
#include <nvtypes.h>
typedef struct {
NvU32 domain; /* PCI domain number */
NvU8 bus; /* PCI bus number */
NvU8 slot; /* PCI slot number */
NvU8 function; /* PCI function number */
NvU16 vendor_id; /* PCI vendor ID */
NvU16 device_id; /* PCI device ID */
} nv_pci_info_t;
/*
* ioctl()'s with parameter structures too large for the
* _IOC cmd layout use the nv_ioctl_xfer_t structure
* and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual
* size and user argument pointer into the RM, which
* will then copy it to/from kernel space in separate steps.
*/
typedef struct nv_ioctl_xfer
{
NvU32 cmd;
NvU32 size;
NvP64 ptr NV_ALIGN_BYTES(8);
} nv_ioctl_xfer_t;
typedef struct nv_ioctl_card_info
{
NvBool valid;
nv_pci_info_t pci_info; /* PCI config information */
NvU32 gpu_id;
NvU16 interrupt_line;
NvU64 reg_address NV_ALIGN_BYTES(8);
NvU64 reg_size NV_ALIGN_BYTES(8);
NvU64 fb_address NV_ALIGN_BYTES(8);
NvU64 fb_size NV_ALIGN_BYTES(8);
NvU32 minor_number;
NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */
} nv_ioctl_card_info_t;
/* alloc event */
typedef struct nv_ioctl_alloc_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_alloc_os_event_t;
/* free event */
typedef struct nv_ioctl_free_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_free_os_event_t;
/* status code */
typedef struct nv_ioctl_status_code
{
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU32 status;
} nv_ioctl_status_code_t;
/* check version string */
#define NV_RM_API_VERSION_STRING_LENGTH 64
typedef struct nv_ioctl_rm_api_version
{
NvU32 cmd;
NvU32 reply;
char versionString[NV_RM_API_VERSION_STRING_LENGTH];
} nv_ioctl_rm_api_version_t;
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1
typedef struct nv_ioctl_query_device_intr
{
NvU32 intrStatus NV_ALIGN_BYTES(4);
NvU32 status;
} nv_ioctl_query_device_intr;
/* system parameters that the kernel driver may use for configuration */
typedef struct nv_ioctl_sys_params
{
NvU64 memblock_size NV_ALIGN_BYTES(8);
} nv_ioctl_sys_params_t;
typedef struct nv_ioctl_register_fd
{
int ctl_fd;
} nv_ioctl_register_fd_t;
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
NvHandle hClient;
NvU32 totalObjects;
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
#endif

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_KERNEL_RMAPI_OPS_H_
#define _NV_KERNEL_RMAPI_OPS_H_
/*
* Define the RMAPI provided to kernel-level RM clients.
*
* Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t
* by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding
* parameter structure in nvidia_kernel_rmapi_ops_t's params union.
* Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to
* rm_kernel_rmapi_op().
*/
#include "nvtypes.h"
#include "nvos.h"
typedef struct {
NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */
union {
NVOS00_PARAMETERS free; /* NV01_FREE */
NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */
NVOS21_PARAMETERS alloc; /* NV04_ALLOC */
NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */
NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */
NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */
NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */
NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */
NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */
NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */
NVOS54_PARAMETERS control; /* NV04_CONTROL*/
NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */
NVOS57_PARAMETERS share; /* NV04_SHARE */
NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */
} params;
} nvidia_kernel_rmapi_ops_t;
#endif /* _NV_KERNEL_RMAPI_OPS_H_ */

View File

@@ -0,0 +1,367 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PRIV_H_
#define _NV_PRIV_H_
#include <nv.h>
#include <os/os.h>
#include <ctrl/ctrl402c.h>
#include <gpu/disp/kern_disp_max.h>
#include <gpu/disp/kern_disp_type.h>
#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d))
#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d))
#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d))
#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1])
#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2])
#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4])
struct OBJGPU;
typedef struct
{
NvBool baseValid;
VGAADDRDESC base;
NvBool workspaceBaseValid;
VGAADDRDESC workspaceBase;
NvU32 vesaMode;
} nv_vga_t;
/*
* device state during Power Management
*/
typedef struct nv_pm_state_s
{
NvU32 IntrEn;
NvBool InHibernate;
} nv_pm_state_t;
/*
* data structure for the UNIX workqueues
*/
typedef struct nv_work_item_s
{
NvU32 flags;
NvU32 gpuInstance;
union
{
OSWorkItemFunction *pGpuFunction;
OSSystemWorkItemFunction *pSystemFunction;
} func;
void *pData;
} nv_work_item_t;
#define NV_WORK_ITEM_FLAGS_NONE 0x0
#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1
#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2
/*
* pseudo-registry data structure
*/
typedef enum
{
NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0,
NV_REGISTRY_ENTRY_TYPE_DWORD,
NV_REGISTRY_ENTRY_TYPE_BINARY,
NV_REGISTRY_ENTRY_TYPE_STRING
} nv_reg_type_t;
typedef struct nv_reg_entry_s
{
char *regParmStr;
NvU32 type;
NvU32 data; // used when type == NV_REGISTRY_ENTRY_TYPE_DWORD
NvU8 *pdata; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
NvU32 len; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
struct nv_reg_entry_s *next;
} nv_reg_entry_t;
#define INVALID_DISP_ID 0xFFFFFFFF
#define MAX_DISP_ID_PER_ADAPTER 0x2
typedef struct nv_i2c_adapter_entry_s
{
void *pOsAdapter;
NvU32 port;
NvU32 displayId[MAX_DISP_ID_PER_ADAPTER];
} nv_i2c_adapter_entry_t;
#define NV_INIT_FLAG_HAL 0x0001
#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002
#define NV_INIT_FLAG_GPU_STATE 0x0004
#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008
#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010
#define NV_INIT_FLAG_CORE_LOGIC 0x0020
#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0040
#define NV_INIT_FLAG_PUBLIC_I2C 0x0080
#define NV_INIT_FLAG_SCALABILITY 0x0100
#define NV_INIT_FLAG_DMA 0x0200
#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS
/*
* GPU dynamic power state machine.
*
* The GPU is in exactly one of these states at at time. Only certain state
* transitions are valid, as documented by the DAGs below.
*
* When in "instant idle" or COARSE mode:
*
* +----------------------+
* v |
* +---------+ +----------------+ +--------+
* | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE |
* +---------+ +----------------+ +--------+
*
* The transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management().
*
* Thereafter, transitions from IDLE_INDICATED to IN_USE happen when
* os_ref_dynamic_power() is called and the refcount transitions from 0 to 1;
* transitions from IN_USE to IDLE_INDICATED happen when
* os_unref_dynamic_power() is called and the refcount transitions from 1 to 0.
* Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE
* are considered in this mode; calls with mode == FINE are ignored. Since
* COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU
* effectively stays in the IN_USE state any time any client has initialized
* it.
*
*
* When in "deferred idle" or FINE mode:
*
* +----------------------------------------------------------------+
* | |
* | |
* | +-------------------------------------------+----------------------+
* | | | v
* | +---------+ +----------------+ +--------------+ +----------------+ +--------+
* | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+
* | +---------+ +----------------+ | | +----------------+ +--------+ |
* | ^ | | | ^ |
* +--------------------+ | IDLE_INSTANT | ------+----------------------+ |
* | | | |
* | | | |
* | | <-----+ |
* +--------------+ |
* ^ |
* +-----------------------------------------------------+
*
* As before, the transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management(). This is not ideal: it means the GPU may
* be powered down immediately upon loading the RM module, even if
* rm_init_adapter() is going to be called soon thereafter. However, we can't
* rely on deferred idle callbacks yet, since those currently rely on core RM
* being initialized.
*
* At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE
* state; during the rm_init_adapter() sequence,
* RmInitDeferredDynamicPowerManagement() will be called which will schedule
* timer callbacks and set the "deferred_idle_enabled" boolean.
*
* While in "deferred idle" mode, one of the callbacks
* timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or
* RmIndicateIdle() should be scheduled when in the states:
* - IN_USE
* - IDLE_INSTANT
* - IDLE_SUSTAINED
* Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g.,
* for a series of RM calls), we don't attempt to schedule the callbacks and
* cancel them on each of these transitions. The
* timerCallbackForIdlePreConditions() callback will simply exit early if in
* the IN_USE state.
*
* As before, the GPU will remain in the IN_USE state until
* os_unref_dynamic_power() is called and the count transitions from 1 to 0
* (calls with mode == FINE are honored, in this mode, and these transitions
* can happen frequently). When the refcount reaches 0, rather than going
* directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT
* state.
*
* Then, when the next timerCallbackForIdlePreConditions() callback executes,
* if all preconditions are met, the state will transition to IDLE_SUSTAINED.
*
* If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the
* GPU will transition back to the IN_USE state and return to the IDLE_INSTANT
* state. This ensures that there is a suitable delay between any activity
* that requires bumping the refcount and indicating idleness.
*
* If the timerCallbackForIdlePreConditions() callback executes again and the
* GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked
* and the timerCallbackToIndicateIdle() callback will be scheduled.
*
* If, before the timerCallbackToIndicateIdle() callback executes, either
* os_ref_dynamic_power() is called or a mapping which has been revoked is
* accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will
* transition back to the IN_USE or IDLE_INSTANT state, respectively.
*
* Then, when the timerCallbackToIndicateIdle() callback executes, if all
* mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED
* state, and all GPU idleness preconditions remain satisfied, the
* RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition
* back to the IDLE_INSTANT state and the callback for preconditions is
* scheduled again.)
*
* Finally, once the RmIndicateIdle() work item is called, if all of the same
* conditions still hold, the state will transition to IDLE_INDICATED. No
* callbacks will be scheduled from here; the callbacks for preconditions
* should be re-scheduled when transitioning out of the IDLE_INDICATED state.
*
* Once in the IDLE_INDICATED state, the kernel is free to call the RM to
* perform the GC6 entry sequence then turn off power to the GPU (although it
* may not, if the audio function is being used for example).
*
* There are two paths to exit the IDLE_INDICATED state:
* (a) If os_ref_dynamic_power() is called, in which case it transitions
* directly to the IN_USE state;
* (b) If RmForceGpuNotIdle() is called, in which case it transitions back to
* the IDLE_INSTANT state.
*/
typedef enum
{
NV_DYNAMIC_POWER_STATE_UNKNOWN = 0,
NV_DYNAMIC_POWER_STATE_IN_USE,
NV_DYNAMIC_POWER_STATE_IDLE_INSTANT,
NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED,
NV_DYNAMIC_POWER_STATE_IDLE_INDICATED,
} nv_dynamic_power_state_t;
typedef struct nv_dynamic_power_s
{
/*
* mode is read without the mutex -- should be read-only outside of
* rm_init_dynamic_power_management, called during probe only.
*/
nv_dynamic_power_mode_t mode;
/*
* Whether to indicate idle immediately when the refcount reaches 0, or
* only go to the IDLE_INSTANT state, and expect timer callbacks to
* transition through IDLE_SUSTAINED -> IDLE_INDICATED.
*/
NvBool deferred_idle_enabled;
nv_dynamic_power_state_t state;
NvS32 refcount;
/*
* A word on lock ordering. These locks must be taken in the order:
*
* RM API lock > this dynamic_power mutex > RM GPUs lock
*
* Skipping any of those locks is fine (if they aren't required to protect
* whatever state is being accessed or modified), so long as the order is
* not violated.
*/
PORT_MUTEX *mutex;
/*
* callback handles for deferred dynamic power management.
*/
NvP64 idle_precondition_check_event;
NvP64 indicate_idle_event;
NvBool idle_precondition_check_callback_scheduled;
/*
* callback handle for kernel initiated gc6 entry/exit.
* these will be protected by the gpu lock.
*/
NvP64 remove_idle_holdoff;
NvBool b_idle_holdoff;
/*
* flag set if the platform does not support fine grain dynamic power
* management.
*/
NvBool b_fine_not_supported;
/*
* Counter to track clients disallowing GCOFF.
*/
NvU32 clients_gcoff_disallow_refcount;
/*
* Maximum FB allocation size which can be saved in system memory
* while doing GCOFF based dynamic PM.
*/
NvU64 gcoff_max_fb_size;
/*
* NVreg_DynamicPowerManagement regkey value set by the user
*/
NvU32 dynamic_power_regkey;
} nv_dynamic_power_t;
typedef struct
{
OBJGPU *pGpu;
NvU32 pmc_boot_0;
nv_vga_t vga;
NvU32 flags;
NvU32 status;
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
void *pVbiosCopy;
NvU32 vbiosSize;
nv_pm_state_t pm_state;
nv_reg_entry_t *pRegistry;
nv_dynamic_power_t dynamic_power;
/* Flag to check if the GPU needs 4K page isolation. */
NvBool b_4k_page_isolation_required;
/* Flag to check if GPU mobile config is enabled */
NvBool b_mobile_config_enabled;
/* Flag to check if S0ix-based power management is enabled. */
NvBool s0ix_pm_enabled;
/*
* Maximum FB allocation size which can be saved in system memory
* during system supened with S0ix-based power management.
*/
NvU64 s0ix_gcoff_max_fb_size;
NvU32 pmc_boot_42;
} nv_priv_t;
#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p))
#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL)
/*
* Make sure that your stack has taken API Lock before using this macro.
*/
#define NV_GET_NV_PRIV_PGPU(nv) \
(NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL)
#endif // _NV_PRIV_H_

View File

@@ -0,0 +1,927 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
*/
#define __NV_REG_STRING(regkey) #regkey
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
/*
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
* of registry keys in the kernel module source code.
*/
#define __NV_REG_VAR(regkey) NVreg_##regkey
#if defined(NV_MODULE_PARAMETER)
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value)
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value)
#endif
#if defined(NV_MODULE_STRING_PARAMETER)
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value)
#endif
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
/*
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
* the regkey and the name of the module parameter. When using this macro, the
* name of the parameter is passed to the extra "parameter" argument, and it is
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
*/
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
/*
*----------------- registry key definitions--------------------------
*/
/*
* Option: ModifyDeviceFiles
*
* Description:
*
* When this option is enabled, the NVIDIA driver will verify the validity
* of the NVIDIA device files in /dev and attempt to dynamically modify
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
* driver to touch the device files, you can use this registry key.
*
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
* capability driver. Furthermore, the NVIDIA capability driver provides
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
* this module parameter per device file.
*
* Possible Values:
* 0 = disable dynamic device file management
* 1 = enable dynamic device file management (default)
*/
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
/*
* Option: DeviceFileUID
*
* Description:
*
* This registry key specifies the UID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default UID is 0 ('root').
*/
#define __NV_DEVICE_FILE_UID DeviceFileUID
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
/*
* Option: DeviceFileGID
*
* Description:
*
* This registry key specifies the GID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default GID is 0 ('root').
*/
#define __NV_DEVICE_FILE_GID DeviceFileGID
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
/*
* Option: DeviceFileMode
*
* Description:
*
* This registry key specifies the device file mode assigned to the NVIDIA
* device files created and/or modified by the NVIDIA driver when dynamic
* device file management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default mode is 0666 (octal, rw-rw-rw-).
*/
#define __NV_DEVICE_FILE_MODE DeviceFileMode
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
/*
* Option: ResmanDebugLevel
*
* Default value: ~0
*/
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
/*
* Option: RmLogonRC
*
* Default value: 1
*/
#define __NV_RM_LOGON_RC RmLogonRC
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
/*
* Option: InitializeSystemMemoryAllocations
*
* Description:
*
* The NVIDIA Linux driver normally clears system memory it allocates
* for use with GPUs or within the driver stack. This is to ensure
* that potentially sensitive data is not rendered accessible by
* arbitrary user applications.
*
* Owners of single-user systems or similar trusted configurations may
* choose to disable the aforementioned clears using this option and
* potentially improve performance.
*
* Possible values:
*
* 1 = zero out system memory allocations (default)
* 0 = do not perform memory clears
*/
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
InitializeSystemMemoryAllocations
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
/*
* Option: RegistryDwords
*
* Description:
*
* This option accepts a semicolon-separated list of key=value pairs. Each
* key name is checked against the table of static options; if a match is
* found, the static option value is overridden, but invalid options remain
* invalid. Pairs that do not match an entry in the static option table
* are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
*/
#define __NV_REGISTRY_DWORDS RegistryDwords
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
/*
* Option: RegistryDwordsPerDevice
*
* Description:
*
* This option allows to specify registry keys per GPU device. It helps to
* control registry at GPU level of granularity. It accepts a semicolon
* separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* For each of the registry keys that follows, key name is checked against the
* table of static options; if a match is found, the static option value is
* overridden, but invalid options remain invalid. Pairs that do not match an
* entry in the static option table are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
* pci=DDDD:BB:DD.F;<key=value>;..;"
*/
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
#define __NV_RM_MSG RmMsg
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
/*
* Option: UsePageAttributeTable
*
* Description:
*
* Enable/disable use of the page attribute table (PAT) available in
* modern x86/x86-64 processors to set the effective memory type of memory
* mappings to write-combining (WC).
*
* If enabled, an x86 processor with PAT support is present and the host
* system's Linux kernel did not configure one of the PAT entries to
* indicate the WC memory type, the driver will change the second entry in
* the PAT from its default (write-through (WT)) to WC at module load
* time. If the kernel did update one of the PAT entries, the driver will
* not modify the PAT.
*
* In both cases, the driver will honor attempts to map memory with the WC
* memory type by selecting the appropriate PAT entry using the correct
* set of PTE flags.
*
* Possible values:
*
* ~0 = use the NVIDIA driver's default logic (default)
* 1 = enable use of the PAT for WC mappings.
* 0 = disable use of the PAT for WC mappings.
*/
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
/*
* Option: EnableMSI
*
* Description:
*
* When this option is enabled and the host kernel supports the MSI feature,
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
* support for this feature instead of using PCI-E wired interrupt.
*
* Possible Values:
*
* 0 = disable MSI interrupt
* 1 = enable MSI interrupt (default)
*
*/
#define __NV_ENABLE_MSI EnableMSI
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
/*
* Option: EnablePCIeGen3
*
* Description:
*
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
* delivering a reliable, high-quality experience is not currently possible in
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
* option to enable PCIe Gen3 support.
*
* This is completely unsupported!
*
* Possible Values:
*
* 0: disable PCIe Gen3 support (default)
* 1: enable PCIe Gen3 support
*/
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
/*
* Option: MemoryPoolSize
*
* Description:
*
* When set to a non-zero value, this option specifies the size of the
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
* satisfy any system memory allocations requested by the NVIDIA kernel
* module.
*/
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
/*
* Option: KMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for kmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
/*
* Option: VMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for vmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
/*
* Option: IgnoreMMIOCheck
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will ignore
* MMIO limit check during device probe on VMWare ESXi kernel. This is
* typically necessary when VMware ESXi MMIO limit differs between any
* base version and its updates. Customer using updates can set regkey
* to avoid probe failure.
*/
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
/*
* Option: TCEBypassMode
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will attempt to setup
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
* necessary for CUDA applications in which large system memory mappings may
* exceed the default TCE remapping capacity when operated in non-bypass mode.
*
* This option has no effect on non-POWER platforms.
*
* Possible Values:
*
* 0: system default TCE mode on all GPUs
* 1: enable TCE bypass mode on all GPUs
* 2: disable TCE bypass mode on all GPUs
*/
#define __NV_TCE_BYPASS_MODE TCEBypassMode
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
#define NV_TCE_BYPASS_MODE_DEFAULT 0
#define NV_TCE_BYPASS_MODE_ENABLE 1
#define NV_TCE_BYPASS_MODE_DISABLE 2
/*
* Option: pci
*
* Description:
*
* On Unix platforms, per GPU based registry key can be specified as:
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
* for which the registry keys are to be applied.
*
* This define is not used on non-UNIX platforms.
*
* Possible Formats for value:
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
*/
#define __NV_PCI_DEVICE_BDF pci
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
/*
* Option: EnableStreamMemOPs
*
* Description:
*
* When this option is enabled, the CUDA driver will enable support for
* CUDA Stream Memory Operations in user-mode applications, which are so
* far required to be disabled by default due to limited support in
* devtools.
*
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
* driver for other reasons.
*
* Possible Values:
*
* 0 = disable feature (default)
* 1 = enable feature
*/
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
/*
* Option: EnableUserNUMAManagement
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will require the
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
* of its NUMA device memory.
*
* This option has no effect on platforms that do not support onlining
* device memory to a NUMA node (this feature is only supported on certain
* POWER9 systems).
*
* Possible Values:
*
* 0: disable user-mode NUMA management
* 1: enable user-mode NUMA management (default)
*/
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
/*
* Option: GpuBlacklist
*
* Description:
*
* This option accepts a list of blacklisted GPUs, separated by commas, that
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs. This regkey is deprecated and will be removed in the future. Use
* NV_REG_EXCLUDED_GPUS instead.
*/
#define __NV_GPU_BLACKLIST GpuBlacklist
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
/*
* Option: ExcludedGpus
*
* Description:
*
* This option accepts a list of excluded GPUs, separated by commas, that
* cannot be attached or used. Each excluded GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs.
*/
#define __NV_EXCLUDED_GPUS ExcludedGpus
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
/*
* Option: NvLinkDisable
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will not attempt to
* initialize or train NVLink connections for any GPUs. System reboot is required
* for changes to take affect.
*
* This option has no effect if no GPUs support NVLink.
*
* Possible Values:
*
* 0: Do not disable NVLink (default)
* 1: Disable NVLink
*/
#define __NV_NVLINK_DISABLE NvLinkDisable
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
/*
* Option: RestrictProfilingToAdminUsers
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will prevent users
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
* using GPU performance counters.
*
* Possible Values:
*
* 0: Do not restrict GPU counters (default)
* 1: Restrict GPU counters to system administrators only
*/
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
/*
* Option: TemporaryFilePath
*
* Description:
*
* When specified, this option changes the location in which the
* NVIDIA kernel module will create unnamed temporary files (e.g. to
* save the contents of video memory in). The indicated file must
* be a directory. By default, temporary files are created in /tmp.
*/
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
/*
* Option: PreserveVideoMemoryAllocations
*
* If enabled, this option prompts the NVIDIA kernel module to save and
* restore all video memory allocations across system power management
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
* only select allocations are preserved.
*
* Possible Values:
*
* 0: Preserve only select video memory allocations (default)
* 1: Preserve all video memory allocations
*/
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
* When this option is enabled, the NVIDIA driver will use S0ix-based
* power management for system suspend/resume, if both the platform and
* the GPU support S0ix.
*
* During system suspend, if S0ix is enabled and
* video memory usage is above the threshold configured by
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
* in self-refresh mode while the rest of the GPU is powered down.
*
* Otherwise, the driver will copy video memory contents to system memory
* and power off the video memory along with the GPU.
*
* Possible Values:
*
* 0: Disable S0ix based power management (default)
* 1: Enable S0ix based power management
*/
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
/*
* Option: S0ixPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use during
* S0ix-based system power management.
*
* When S0ix is enabled and the system is suspended, the driver will
* compare the amount of video memory in use with this threshold,
* to decide whether to keep video memory in self-refresh or copy video
* memory content to system memory.
*
* See the 'EnableS0ixPowerManagement' option.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* Default value for this option is 256MB.
*
*/
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
S0ixPowerManagementVideoMemoryThreshold
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
* This option controls how aggressively the NVIDIA kernel module will manage
* GPU power through kernel interfaces.
*
* Possible Values:
*
* 0: Never allow the GPU to be powered down (default).
* 1: Power down the GPU when it is not initialized.
* 2: Power down the GPU after it has been inactive for some time.
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
* the GPU.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
/*
* Option: DynamicPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use
* when selecting the dynamic power management scheme.
*
* When the driver detects that the GPU is idle, it will compare the amount
* of video memory in use with this threshold.
*
* If the current video memory usage is less than the threshold, the
* driver may preserve video memory contents in system memory and power off
* the video memory along with the GPU itself, if supported. Otherwise,
* the video memory will be kept in self-refresh mode while powering down
* the rest of the GPU, if supported.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* If the requested value is greater than 200MB (the default), then it
* will be capped to 200MB.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
DynamicPowerManagementVideoMemoryThreshold
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: RegisterPCIDriver
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with
* PCI subsystem.
*
* Possible values:
*
* 1 - register as PCI driver (default)
* 0 - do not register as PCI driver
*/
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
/*
* Option: EnablePCIERelaxedOrderingMode
*
* Description:
*
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
* every device to set the relaxed ordering bit to 1 in all outbound MWr
* transaction-layer packets. This is equivalent to setting the regkey to
* FORCE_ENABLE as a non-per-device registry key.
*
* Possible values:
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
* 1 - Enable PCIe TLP relaxed ordering bit-setting
*/
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableGpuFirmware
*
* Description:
*
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* Possible mode values:
* 0 - Do not enable GPU firmware
* 1 - Enable GPU firmware
* 2 - (Default) Use the default enablement policy for GPU firmware
*
* Setting this to anything other than 2 will alter driver firmware-
* enablement policies, possibly disabling GPU firmware where it would
* have otherwise been enabled by default.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* Policy bits:
*
* POLICY_ALLOW_FALLBACK:
* As the normal behavior is to fail GPU initialization if this registry
* entry is set in such a way that results in an invalid configuration, if
* instead the user would like the driver to automatically try to fallback
* to initializing the failing GPU with firmware disabled, then this bit can
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
* Note that this can result in a mixed mode configuration (ex: GPU0 has
* firmware enabled, but GPU1 does not).
*
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* Possible values:
* 0 - Do not send GPU firmware logs to the system log
* 1 - Enable sending of GPU firmware logs to the system log
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
* the debug kernel driver build only
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
/*
* Option: EnableDbgBreakpoint
*
* When this option is set to a non-zero value, and the kernel is configured
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
* INT3 on x86_64), assumed to be caught by an attached debugger.
*
* When this option is set to the value zero (the default), assertions within
* resman will print to the system log, but no CPU breakpoint will be triggered.
*/
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
/*
* Option: NVreg_DmaRemapPeerMmio
*
* Description:
*
* When this option is enabled, the NVIDIA driver will use device driver
* APIs provided by the Linux kernel for DMA-remapping part of a device's
* MMIO region to another device, creating e.g., IOMMU mappings as necessary.
* When this option is disabled, the NVIDIA driver will instead only apply a
* fixed offset, which may be zero, to CPU physical addresses to produce the
* DMA address for the peer's MMIO region, and no IOMMU mappings will be
* created.
*
* This option only affects peer MMIO DMA mappings, and not system memory
* mappings.
*
* Possible Values:
* 0 = disable dynamic DMA remapping of peer MMIO regions
* 1 = enable dynamic DMA remapping of peer MMIO regions (default)
*/
#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio
#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO)
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
*---------registry key parameter declarations--------------
*/
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
/*
*----------------registry database definition----------------------
*/
/*
* You can enable any of the registry options disabled by default by
* editing their respective entries in the table below. The last field
* determines if the option is considered valid - in order for the
* changes to take effect, you need to recompile and reload the NVIDIA
* kernel module.
*/
nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO),
{NULL, NULL}
};
#elif defined(NVRM)
extern nv_parm_t nv_parms[];
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
#endif /* _RM_REG_H_ */

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#include <nvos.h>
/*
* This is a wrapper for NVOS02_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS02_PARAMETERS params;
int fd;
} nv_ioctl_nvos02_parameters_with_fd;
/*
* This is a wrapper for NVOS33_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS33_PARAMETERS params;
int fd;
} nv_ioctl_nvos33_parameters_with_fd;
#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_ESCAPE_H_INCLUDED
#define NV_ESCAPE_H_INCLUDED
#define NV_ESC_RM_ALLOC_MEMORY 0x27
#define NV_ESC_RM_ALLOC_OBJECT 0x28
#define NV_ESC_RM_FREE 0x29
#define NV_ESC_RM_CONTROL 0x2A
#define NV_ESC_RM_ALLOC 0x2B
#define NV_ESC_RM_CONFIG_GET 0x32
#define NV_ESC_RM_CONFIG_SET 0x33
#define NV_ESC_RM_DUP_OBJECT 0x34
#define NV_ESC_RM_SHARE 0x35
#define NV_ESC_RM_CONFIG_GET_EX 0x37
#define NV_ESC_RM_CONFIG_SET_EX 0x38
#define NV_ESC_RM_I2C_ACCESS 0x39
#define NV_ESC_RM_IDLE_CHANNELS 0x41
#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A
#define NV_ESC_RM_ACCESS_REGISTRY 0x4D
#define NV_ESC_RM_MAP_MEMORY 0x4E
#define NV_ESC_RM_UNMAP_MEMORY 0x4F
#define NV_ESC_RM_GET_EVENT_DATA 0x52
#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54
#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56
#define NV_ESC_RM_MAP_MEMORY_DMA 0x57
#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58
#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59
#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C
#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D
#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E
#endif // NV_ESCAPE_H_INCLUDED

View File

@@ -0,0 +1,241 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Os interface definitions needed by os-interface.c
*/
#ifndef OS_INTERFACE_H
#define OS_INTERFACE_H
/******************* Operating System Interface Routines *******************\
* *
* Operating system wrapper functions used to abstract the OS. *
* *
\***************************************************************************/
#include <nvtypes.h>
#include <nvstatus.h>
#include "nv_stdarg.h"
#include <nv-kernel-interface-api.h>
#include <os/nv_memory_type.h>
#include <nv-caps.h>
typedef struct
{
NvU32 os_major_version;
NvU32 os_minor_version;
NvU32 os_build_number;
const char * os_build_version_str;
const char * os_build_date_plus_str;
}os_version_info;
/* Each OS defines its own version of this opaque type */
struct os_work_queue;
/* Each OS defines its own version of this opaque type */
typedef struct os_wait_queue os_wait_queue;
/*
* ---------------------------------------------------------------------------
*
* Function prototypes for OS interface.
*
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_current_tick (void);
NvU64 NV_API_CALL os_get_current_tick_hr (void);
NvU64 NV_API_CALL os_get_tick_resolution (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_allow_priority_override (void);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvU32 os_sev_status;
extern NvBool os_sev_enabled;
extern NvBool os_dma_buf_enabled;
/*
* ---------------------------------------------------------------------------
*
* Debug macros.
*
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
/*
* Fields for os_lock_user_pages flags parameter
*/
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
// NV OS Tegra platform type defines
#define NV_OS_TEGRA_PLATFORM_SIM 0
#define NV_OS_TEGRA_PLATFORM_FPGA 1
#define NV_OS_TEGRA_PLATFORM_SILICON 2
#endif /* OS_INTERFACE_H */

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_CUSTOM_H_
#define _OS_CUSTOM_H_
/*!
* @file os_custom.h
* @brief OS module specific definitions for this OS
*/
#include <os-interface.h>
#include <osfuncs.h>
// File modes, added for NVIDIA capabilities.
#define OS_RUSR 00400 // read permission, owner
#define OS_WUSR 00200 // write permission, owner
#define OS_XUSR 00100 // execute/search permission, owner
#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner
#define OS_RGRP 00040 // read permission, group
#define OS_WGRP 00020 // write permission, group
#define OS_XGRP 00010 // execute/search permission, group
#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group
#define OS_ROTH 00004 // read permission, other
#define OS_WOTH 00002 // write permission, other
#define OS_XOTH 00001 // execute/search permission, other
#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other
#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH)
#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH)
#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH)
// Trigger for collecting GPU state for later extraction.
NV_STATUS RmLogGpuCrash(OBJGPU *);
// This is callback function in the miniport.
// The argument is a device extension, and must be cast as such to be useful.
typedef void (*MINIPORT_CALLBACK)(void*);
NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *);
#endif // _OS_CUSTOM_H_

View File

@@ -0,0 +1,192 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OSAPI_H_
#define _OSAPI_H_
#include "core/system.h"
#include "gpu/gpu.h"
#include <os-interface.h> // NV_DBG_ERRORS
#include <rmapi/rmapi.h>
#include <core/thread_state.h>
#if defined(__use_altstack__)
#if defined(QA_BUILD)
//---------------------------------------------------------------------------
//
// 32 bit debug marker values.
//
//---------------------------------------------------------------------------
#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N')
#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M')
//
// The two macros below implement a simple alternate stack usage sanity
// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills
// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS()
// to determine the stack usage fairly reliably by looking for the
// first clobbered marker. If more than 7/8 of the alternate stack were
// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts.
//
#define NV_ALTSTACK_WRITE_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
stack[i] = NV_MARKER1; \
}
#define NV_ALTSTACK_CHECK_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
{ \
if (stack[i] != NV_MARKER1) \
break; \
} \
if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \
((sp)->size - (i * sizeof(NvU32))), (sp)->size); \
NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \
} \
}
#else
#define NV_ALTSTACK_WRITE_MARKERS(sp)
#define NV_ALTSTACK_CHECK_MARKERS(sp)
#endif
#if defined(NVCPU_X86_64)
#define NV_ENTER_RM_RUNTIME(sp,fp) \
{ \
NV_ALTSTACK_WRITE_MARKERS(sp); \
__asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \
}
#define NV_EXIT_RM_RUNTIME(sp,fp) \
{ \
register void *__rbp __asm__ ("rbp"); \
if (__rbp != (sp)->top) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \
NV_ASSERT_PRECOMP(__rbp == (sp)->top); \
} \
NV_ALTSTACK_CHECK_MARKERS(sp); \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \
}
#else
#error "gcc \"altstacks\" support is not implemented on this platform!"
#endif
#else
#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; }
#define NV_EXIT_RM_RUNTIME(sp,fp)
#endif
void RmShutdownRm (void);
NvBool RmInitPrivateState (nv_state_t *);
void RmFreePrivateState (nv_state_t *);
NvBool RmInitAdapter (nv_state_t *);
NvBool RmPartiallyInitAdapter (nv_state_t *);
void RmShutdownAdapter (nv_state_t *);
void RmDisableAdapter (nv_state_t *);
void RmPartiallyDisableAdapter(nv_state_t *);
NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *);
NV_STATUS RmExcludeAdapter (nv_state_t *);
NvBool RmGpuHasIOSpaceEnabled (nv_state_t *);
void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *);
NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS RmFreeOsEvent (NvHandle, NvU32);
void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
void RmFreeX86EmuState(OBJGPU *);
NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32);
const NvU8 *RmGetGpuUuidRaw(nv_state_t *);
NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *);
int amd_adv_spec_cache_feature(OBJOS *);
int amd_msr_c0011022_incompatible(OBJOS *);
NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *);
NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS rm_free_os_event (NvHandle, NvU32);
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *);
NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *);
// registry management
NV_STATUS RmInitRegistry (void);
NV_STATUS RmDestroyRegistry (nv_state_t *);
NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 );
NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *);
NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32);
NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32);
NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *);
NvBool RmIsNvifFunctionSupported(NvU32, NvU32);
void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *);
void RmUnInitAcpiMethods (OBJSYS *);
void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64);
void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64);
void RmInitS0ixPowerManagement (nv_state_t *);
void RmInitDeferredDynamicPowerManagement (nv_state_t *);
void RmDestroyDeferredDynamicPowerManagement(nv_state_t *);
NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *);
void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool);
NvBool rm_get_uefi_console_status (nv_state_t *);
NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *);
RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module);
void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *);
static inline NvBool rm_is_system_notebook(void)
{
return (nv_is_chassis_notebook() || nv_acpi_is_battery_present());
}
#endif // _OSAPI_H_

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef OSFUNCS_H
#define OSFUNCS_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Declarations for the Operating System Specific Functions. *
* *
\***************************************************************************/
#include <os/os.h>
OSQueueWorkItem osQueueWorkItem;
OSQueueWorkItemWithFlags osQueueWorkItemWithFlags;
OSQueueSystemWorkItem osQueueSystemWorkItem;
OSDbgBreakpointEnabled osDbgBreakpointEnabled;
void* osGetStereoDongleInterface(void);
OSCallACPI_DSM osCallACPI_DSM;
OSCallACPI_DDC osCallACPI_DDC;
OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM;
OSCallACPI_DOD osCallACPI_DOD;
OSCallACPI_MXDS osCallACPI_MXDS;
OSCallACPI_MXDM osCallACPI_MXDM;
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
NvU64 nv_rdcr3(OBJOS *);
OSnv_cpuid nv_cpuid;
#endif
#endif // OSFUNCS_H

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMOBJEXPORTIMPORT_H_
#define _RMOBJEXPORTIMPORT_H_
#include "nvstatus.h"
typedef NvHandle RmObjExportHandle;
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance);
void RmFreeObjExportHandle(RmObjExportHandle hObject);
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType);
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance);
#endif // _RMOBJEXPORTIMPORT_H_

View File

@@ -0,0 +1,859 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//***************************** Module Header **********************************
//
// This code is linked into the resource manager proper. It receives the
// ioctl from the resource manager's customer, unbundles the args and
// calls the correct resman routines.
//
//******************************************************************************
#include <core/prelude.h>
#include <core/locks.h>
#include <nv.h>
#include <nv_escape.h>
#include <osapi.h>
#include <rmapi/exports.h>
#include <nv-unix-nvos-params-wrappers.h>
#include <nvos.h>
#include <class/cl0000.h> // NV01_ROOT
#include <class/cl0001.h> // NV01_ROOT_NON_PRIV
#include <class/cl0005.h> // NV01_EVENT
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#define NV_CTL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
#define NV_ACTUAL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) != 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
static NvBool RmIsDeviceRefNeeded(NVOS54_PARAMETERS *pApi, NvS32 *fd)
{
switch(pApi->cmd)
{
default:
*fd = -1;
return NV_FALSE;
}
}
// only return errors through pApi->status
static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus;
NvBool writable;
NvU32 flags = 0;
NvU64 allocSize, pageCount, *pPteArray = NULL;
void *pDescriptor, *pPageArray = NULL;
pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor);
if (((NvUPtr)pDescriptor & ~os_page_mask) != 0)
{
rmStatus = NV_ERR_NOT_SUPPORTED;
goto done;
}
// Check to prevent an NvU64 overflow
if ((pApi->data.AllocOsDesc.limit + 1) == 0)
{
rmStatus = NV_ERR_INVALID_LIMIT;
goto done;
}
allocSize = (pApi->data.AllocOsDesc.limit + 1);
pageCount = (1 + ((allocSize - 1) / os_page_size));
writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2);
flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags);
rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags);
if (rmStatus == NV_OK)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (rmStatus == NV_ERR_INVALID_ADDRESS)
{
rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount,
&pPteArray, &pPageArray);
if (rmStatus == NV_OK)
{
if (pPageArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (pPteArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY;
}
else
{
NV_ASSERT_FAILED("unknown memory import type");
rmStatus = NV_ERR_NOT_SUPPORTED;
}
}
}
if (rmStatus != NV_OK)
goto done;
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
if (pApi->status != NV_OK)
{
switch (pApi->data.AllocOsDesc.descriptorType)
{
default:
break;
case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY:
os_unlock_user_pages(pageCount, pPageArray);
break;
}
}
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
// only return errors through pApi->status
static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus = NV_OK;
NvU32 flags, attr, attr2;
NVOS32_PARAMETERS *pVidHeapParams;
if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) ||
!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags))
{
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI);
if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) ||
FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags))
{
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr);
}
else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr);
else {
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr);
else
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr);
if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags))
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES);
else
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO);
pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS));
if (pVidHeapParams == NULL)
{
rmStatus = NV_ERR_NO_MEMORY;
goto done;
}
portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS));
pVidHeapParams->hRoot = pApi->hRoot;
pVidHeapParams->hObjectParent = pApi->hObjectParent;
pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR;
flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED |
NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED);
if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2);
// currently CPU-RO memory implies GPU-RO as well
if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) ||
DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2);
pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew;
pVidHeapParams->data.AllocOsDesc.flags = flags;
pVidHeapParams->data.AllocOsDesc.attr = attr;
pVidHeapParams->data.AllocOsDesc.attr2 = attr2;
pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory;
pVidHeapParams->data.AllocOsDesc.limit = pApi->limit;
pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS;
RmCreateOsDescriptor(pVidHeapParams, secInfo);
pApi->status = pVidHeapParams->status;
portMemFree(pVidHeapParams);
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms));
NV_STATUS RmIoctl(
nv_state_t *nv,
nv_file_private_t *nvfp,
NvU32 cmd,
void *data,
NvU32 dataSize
)
{
NV_STATUS rmStatus = NV_ERR_GENERIC;
API_SECURITY_INFO secInfo = { };
secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER;
secInfo.paramLocation = PARAM_LOCATION_USER;
secInfo.pProcessToken = NULL;
secInfo.gpuOsInfo = NULL;
secInfo.clientOSInfo = nvfp->ctl_nvfp;
if (secInfo.clientOSInfo == NULL)
secInfo.clientOSInfo = nvfp;
switch (cmd)
{
case NV_ESC_RM_ALLOC_MEMORY:
{
nv_ioctl_nvos02_parameters_with_fd *pApi;
NVOS02_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR)
RmAllocOsDescriptor(pParms, secInfo);
else
{
NvU32 flags = pParms->flags;
Nv01AllocMemoryWithSecInfo(pParms, secInfo);
//
// If the system memory is going to be mapped immediately,
// create the mmap context for it now.
//
if ((pParms->hClass == NV01_MEMORY_SYSTEM) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) &&
(pParms->status == NV_OK))
{
if (rm_create_mmap_context(nv, pParms->hRoot,
pParms->hObjectParent, pParms->hObjectNew,
pParms->pMemory, pParms->limit + 1, 0,
NV_MEMORY_DEFAULT,
pApi->fd) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"could not create mmap context for %p\n",
NvP64_VALUE(pParms->pMemory));
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
}
}
break;
}
case NV_ESC_RM_ALLOC_OBJECT:
{
NVOS05_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS05_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01AllocObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ALLOC:
{
NVOS21_PARAMETERS *pApi = data;
NVOS64_PARAMETERS *pApiAccess = data;
NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS));
if ((dataSize != sizeof(NVOS21_PARAMETERS)) &&
(dataSize != sizeof(NVOS64_PARAMETERS)))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
switch (pApi->hClass)
{
case NV01_ROOT:
case NV01_ROOT_CLIENT:
case NV01_ROOT_NON_PRIV:
{
NV_CTL_DEVICE_ONLY(nv);
// Force userspace client allocations to be the _CLIENT class.
pApi->hClass = NV01_ROOT_CLIENT;
break;
}
case NV01_EVENT:
case NV01_EVENT_OS_EVENT:
case NV01_EVENT_KERNEL_CALLBACK:
case NV01_EVENT_KERNEL_CALLBACK_EX:
{
break;
}
default:
{
NV_CTL_DEVICE_ONLY(nv);
break;
}
}
if (!bAccessApi)
{
Nv04AllocWithSecInfo(pApi, secInfo);
}
else
{
Nv04AllocWithAccessSecInfo(pApiAccess, secInfo);
}
break;
}
case NV_ESC_RM_FREE:
{
NVOS00_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS00_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01FreeWithSecInfo(pApi, secInfo);
if (pApi->status == NV_OK &&
pApi->hObjectOld == pApi->hRoot)
{
rm_client_free_os_events(pApi->hRoot);
}
break;
}
case NV_ESC_RM_VID_HEAP_CONTROL:
{
NVOS32_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS32_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR)
RmCreateOsDescriptor(pApi, secInfo);
else
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_I2C_ACCESS:
{
NVOS_I2C_ACCESS_PARAMS *pApi = data;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04I2CAccessWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_IDLE_CHANNELS:
{
NVOS30_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS30_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04IdleChannelsWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY:
{
nv_ioctl_nvos33_parameters_with_fd *pApi;
NVOS33_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// Don't allow userspace to override the caching type
pParms->flags = FLD_SET_DRF(OS33, _FLAGS, _CACHING_TYPE, _DEFAULT, pParms->flags);
Nv04MapMemoryWithSecInfo(pParms, secInfo);
if (pParms->status == NV_OK)
{
pParms->status = rm_create_mmap_context(nv, pParms->hClient,
pParms->hDevice, pParms->hMemory,
pParms->pLinearAddress, pParms->length,
pParms->offset,
DRF_VAL(OS33, _FLAGS, _CACHING_TYPE, pParms->flags),
pApi->fd);
if (pParms->status != NV_OK)
{
NVOS34_PARAMETERS params;
portMemSet(&params, 0, sizeof(NVOS34_PARAMETERS));
params.hClient = pParms->hClient;
params.hDevice = pParms->hDevice;
params.hMemory = pParms->hMemory;
params.pLinearAddress = pParms->pLinearAddress;
params.flags = pParms->flags;
Nv04UnmapMemoryWithSecInfo(&params, secInfo);
}
}
break;
}
case NV_ESC_RM_UNMAP_MEMORY:
{
NVOS34_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS34_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ACCESS_REGISTRY:
{
NVOS38_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS38_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_access_registry(pApi->hClient,
pApi->hObject,
pApi->AccessType,
pApi->pDevNode,
pApi->DevNodeLength,
pApi->pParmStr,
pApi->ParmStrLength,
pApi->pBinaryData,
&pApi->BinaryDataLength,
&pApi->Data,
&pApi->Entry);
break;
}
case NV_ESC_RM_ALLOC_CONTEXT_DMA2:
{
NVOS39_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS39_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04AllocContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_BIND_CONTEXT_DMA:
{
NVOS49_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS49_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04BindContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY_DMA:
{
NVOS46_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS46_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04MapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_UNMAP_MEMORY_DMA:
{
NVOS47_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS47_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_DUP_OBJECT:
{
NVOS55_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS55_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04DupObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_SHARE:
{
NVOS57_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS57_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04ShareWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_ALLOC_OS_EVENT:
{
nv_ioctl_alloc_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_alloc_os_event(pApi->hClient,
nvfp,
pApi->fd);
break;
}
case NV_ESC_FREE_OS_EVENT:
{
nv_ioctl_free_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_free_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd);
break;
}
case NV_ESC_RM_GET_EVENT_DATA:
{
NVOS41_PARAMETERS *pApi = data;
if (dataSize != sizeof(NVOS41_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_get_event_data(nvfp,
pApi->pEvent,
&pApi->MoreEvents);
break;
}
case NV_ESC_STATUS_CODE:
{
nv_state_t *pNv;
nv_ioctl_status_code_t *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_status_code_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot);
if (pNv == NULL)
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
rmStatus = rm_get_adapter_status(pNv, &pApi->status);
if (rmStatus != NV_OK)
goto done;
break;
}
case NV_ESC_RM_CONTROL:
{
NVOS54_PARAMETERS *pApi = data;
void *priv = NULL;
nv_file_private_t *nvfp;
NvS32 fd;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS54_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (RmIsDeviceRefNeeded(pApi, &fd))
{
nvfp = nv_get_file_private(fd, NV_FALSE, &priv);
if (nvfp == NULL)
{
rmStatus = NV_ERR_INVALID_DEVICE;
goto done;
}
secInfo.gpuOsInfo = priv;
}
Nv04ControlWithSecInfo(pApi, secInfo);
if ((pApi->status != NV_OK) && (priv != NULL))
{
nv_put_file_private(priv);
secInfo.gpuOsInfo = NULL;
}
break;
}
case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO:
{
NVOS56_PARAMETERS *pApi = data;
void *pOldCpuAddress;
void *pNewCpuAddress;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS56_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress);
pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress);
pApi->status = rm_update_device_mapping_info(pApi->hClient,
pApi->hDevice,
pApi->hMemory,
pOldCpuAddress,
pNewCpuAddress);
break;
}
case NV_ESC_REGISTER_FD:
{
nv_ioctl_register_fd_t *params = data;
void *priv = NULL;
nv_file_private_t *ctl_nvfp;
if (dataSize != sizeof(nv_ioctl_register_fd_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus != NV_OK)
goto done;
// If there is already a ctl fd registered on this nvfp, fail.
if (nvfp->ctl_nvfp != NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_STATE;
goto done;
}
//
// Note that this call is valid for both "actual" devices and ctrl
// devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with
// both types of devices.
// But, the ctl_fd passed in should always correspond to a control FD.
//
ctl_nvfp = nv_get_file_private(params->ctl_fd,
NV_TRUE, /* require ctl fd */
&priv);
if (ctl_nvfp == NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// Disallow self-referential links, and disallow links to FDs that
// themselves have a link.
if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL))
{
nv_put_file_private(priv);
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
//
// nvfp->ctl_nvfp is read outside the lock, so set it atomically.
// Note that once set, this can never be removed until the fd
// associated with nvfp is closed. We hold on to 'priv' until the
// fd is closed, too, to ensure that the fd associated with
// ctl_nvfp remains valid.
//
portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp);
nvfp->ctl_nvfp_priv = priv;
// UNLOCK: release API lock
rmApiLockRelease();
// NOTE: nv_put_file_private(priv) is not called here. It MUST be
// called during cleanup of this nvfp.
rmStatus = NV_OK;
break;
}
default:
{
NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd);
goto done;
}
}
rmStatus = NV_OK;
done:
return rmStatus;
}

View File

@@ -0,0 +1,299 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <rmconfig.h>
#include <gpu/subdevice/subdevice.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
return NV_OK;
}
void NV_API_CALL rm_init_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bPr3AcpiMethodPresent
)
{
}
void NV_API_CALL rm_cleanup_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS NV_API_CALL rm_ref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void NV_API_CALL rm_unref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_transition_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bEnter
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_dynamic_power_management_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_gpu_gcx_support(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvBool bGcxTypeGC6
)
{
return "?";
}
NV_STATUS
subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams
)
{
return NV_OK;
}
void
RmUpdateGc6ConsoleRefCount
(
nv_state_t *nv,
NvBool bIncrease
)
{
}
void
RmInitS0ixPowerManagement
(
nv_state_t *nv
)
{
}
void
RmInitDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void
RmDestroyDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void RmHandleDisplayChange
(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS
os_ref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void
os_unref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *limitRated,
NvU32 *limitCurr
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_save_low_res_mode(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NvBool NV_API_CALL rm_isr(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *NeedBottomHalf
)
{
*NeedBottomHalf = NV_FALSE;
return NV_TRUE;
}
void NV_API_CALL rm_isr_bh(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
}
void NV_API_CALL rm_isr_bh_unlocked(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NvBool NV_API_CALL rm_is_chipset_io_coherent
(
nvidia_stack_t *sp
)
{
return NV_FALSE;
}
NvBool NV_API_CALL rm_disable_iomap_wc(void)
{
return NV_FALSE;
}
NV_STATUS RmInitX86EmuState(OBJGPU *pGpu)
{
return NV_OK;
}
void RmFreeX86EmuState(OBJGPU *pGpu)
{
}

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvtypes.h>
#include <os-interface.h>
void* memset(void* s, int c, NvUPtr n)
{
return os_mem_set(s, (NvU8)c, (NvU32)n);
}
void* memcpy(void* dest, const void* src, NvUPtr n)
{
return os_mem_copy(dest, src, (NvU32)n);
}

View File

@@ -0,0 +1,150 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvstatus.h"
#include "os/os.h"
#include "nv.h"
#include "nv-hypervisor.h"
HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
{
return OS_HYPERVISOR_UNKNOWN;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 *numVgpuTypes,
NvU32 **vgpuTypeIds,
NvBool isVirtfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU8 cmd,
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvBool isMdevAttached,
void *vf_pci_info
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 vgpuTypeId,
char *buffer,
int type_info,
NvU8 devfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_create_request(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_update_request(
nvidia_stack_t *sp ,
const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState,
NvU64 *offsets,
NvU64 *sizes,
const char *configParams
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp ,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 **offsets,
NvU64 **sizes,
NvU32 *numAreas
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_start(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_delete(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
NvU16 vgpuId
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef
)
{
return NV_ERR_NOT_SUPPORTED;
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{}

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/***************************** HW State Routines ***************************\
* *
* Fills in os specific function pointers for the Unix OS object. *
* *
\***************************************************************************/
#include <osfuncs.h>
#include <os/os.h>
static void initOSSpecificFunctionPointers(OBJOS *);
static void initMiscOSFunctionPointers(OBJOS *);
static void initUnixOSFunctionPointers(OBJOS *);
static void initOSSpecificProperties(OBJOS *);
void
osInitObjOS(OBJOS *pOS)
{
initOSSpecificFunctionPointers(pOS);
initOSSpecificProperties(pOS);
}
static void
initOSSpecificFunctionPointers(OBJOS *pOS)
{
initMiscOSFunctionPointers(pOS);
initUnixOSFunctionPointers(pOS);
}
static void
initMiscOSFunctionPointers(OBJOS *pOS)
{
pOS->osQueueWorkItem = osQueueWorkItem;
pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags;
pOS->osQueueSystemWorkItem = osQueueSystemWorkItem;
}
static void
initUnixOSFunctionPointers(OBJOS *pOS)
{
#if defined(NVCPU_X86_64)
pOS->osNv_rdcr4 = nv_rdcr4;
pOS->osNv_cpuid = nv_cpuid;
#endif
pOS->osCallACPI_DSM = osCallACPI_DSM;
pOS->osCallACPI_DDC = osCallACPI_DDC;
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
pOS->osCallACPI_DOD = osCallACPI_DOD;
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
}
static void
initOSSpecificProperties
(
OBJOS *pOS
)
{
pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE);
}

View File

@@ -0,0 +1,145 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <osapi.h>
#include <core/locks.h>
static NV_STATUS
RmPowerManagementInternalTegra(
OBJGPU *pGpu,
nv_pm_action_t pmAction
)
{
//
// Default to NV_OK. there may cases where resman is loaded, but
// no devices are allocated (we're still at the console). in these
// cases, it's fine to let the system do whatever it wants.
//
NV_STATUS rmStatus = NV_OK;
if (pGpu)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
switch (pmAction)
{
case NV_PM_ACTION_HIBERNATE:
nvp->pm_state.InHibernate = NV_TRUE;
case NV_PM_ACTION_STANDBY:
nvp->pm_state.InHibernate = NV_FALSE;
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_TRUE);
rmStatus = gpuStateUnload(pGpu,
IS_GPU_GC6_STATE_ENTERING(pGpu) ?
GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION | GPU_STATE_FLAGS_GC6_TRANSITION :
GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION);
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_TRUE);
break;
case NV_PM_ACTION_RESUME:
if (!nvp->pm_state.InHibernate)
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_TRUE);
rmStatus = gpuStateLoad(pGpu,
IS_GPU_GC6_STATE_ENTERING(pGpu) ?
GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION | GPU_STATE_FLAGS_GC6_TRANSITION :
GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION);
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_FALSE);
}
break;
default:
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
}
return rmStatus;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu != NULL)
{
// LOCK: acquire GPUs lock
if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
rmStatus = RmPowerManagementInternalTegra(pGpu, pmAction);
//
// RmPowerManagementInternalTegra() is most likely to fail due to
// gpuStateUnload() failures deep in the RM's GPU power
// management paths. However, those paths make no
// attempt to unwind in case of errors. Rather, they
// soldier on and simply report an error at the very end.
// GPU software state meanwhile will indicate the GPU
// has been suspended.
//
// Sadly, in case of an error during suspend/hibernate,
// the only path forward here is to attempt to resume the
// GPU, accepting that the odds of success will vary.
//
if (rmStatus != NV_OK && pmAction != NV_PM_ACTION_RESUME)
{
RmPowerManagementInternalTegra(pGpu, NV_PM_ACTION_RESUME);
}
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
}
// UNLOCK: release API lock
rmApiLockRelease();
}
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -0,0 +1,524 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <nvos.h>
#if defined(DEBUG_REGISTRY)
#define DBG_REG_PRINTF(a, ...) \
NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__)
#else
#define DBG_REG_PRINTF(a, ...)
#endif
static NvS32 stringCaseCompare(
const char *string1,
const char *string2
)
{
NvU8 c1, c2;
do
{
c1 = *string1, c2 = *string2;
if (c1 >= 'A' && c1 <= 'Z')
c1 += ('a' - 'A');
if (c2 >= 'A' && c2 <= 'Z')
c2 += ('a' - 'A');
string1++, string2++;
}
while ((c1 == c2) && (c1 != '\0'));
return (c1 - c2);
}
static nv_reg_entry_t *the_registry = NULL;
static nv_reg_entry_t* regCreateNewRegistryKey(
nv_state_t *nv,
const char *regParmStr
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *new_reg = NULL;
char *new_ParmStr = NULL;
NvU32 parm_size;
if (regParmStr == NULL)
{
DBG_BREAKPOINT();
return NULL;
}
new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t));
if (NULL == new_reg)
{
NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n");
return NULL;
}
portMemSet(new_reg, 0, sizeof(nv_reg_entry_t));
if (regParmStr != NULL)
{
parm_size = (portStringLength(regParmStr) + 1);
new_ParmStr = portMemAllocNonPaged(parm_size);
if (NULL == new_ParmStr)
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n");
portMemFree(new_reg);
return NULL;
}
NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH);
if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n");
portMemFree(new_ParmStr);
portMemFree(new_reg);
return NULL;
}
}
new_reg->regParmStr = new_ParmStr;
new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN;
if (nvp != NULL)
{
new_reg->next = nvp->pRegistry;
nvp->pRegistry = new_reg;
DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry);
}
else
{
new_reg->next = the_registry;
the_registry = new_reg;
DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry);
}
return new_reg;
}
static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp)
{
portMemFree(tmp->regParmStr);
tmp->regParmStr = NULL;
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
portMemFree(tmp);
return NV_OK;
}
static nv_reg_entry_t* regFindRegistryEntry(
nv_state_t *nv,
const char *regParmStr,
NvU32 type,
NvBool *bGlobalEntry
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
if (nvp != NULL)
{
tmp = nvp->pRegistry;
DBG_REG_PRINTF(" local registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_FALSE;
return tmp;
}
tmp = tmp->next;
}
}
tmp = the_registry;
DBG_REG_PRINTF(" global registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_TRUE;
return tmp;
}
tmp = tmp->next;
}
DBG_REG_PRINTF(" no match\n");
return NULL;
}
NV_STATUS RmWriteRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 Data
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if (regParmStr == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
tmp->data = Data;
if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0)
{
os_dbg_set_level(Data);
}
return NV_OK;
}
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
return NV_ERR_GENERIC;
tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD;
tmp->data = Data;
return NV_OK;
}
NV_STATUS RmReadRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 *Data
)
{
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, NULL);
if (tmp == NULL)
{
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if ((tmp != NULL) && (tmp->len >= sizeof(NvU32)))
{
*Data = *(NvU32 *)tmp->pdata;
}
else
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
}
else
{
*Data = tmp->data;
}
DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data);
return NV_OK;
}
NV_STATUS RmReadRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 *cbLen
)
{
nv_reg_entry_t *tmp;
NV_STATUS status;
if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if (tmp == NULL)
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
DBG_REG_PRINTF(" found\n");
if (*cbLen >= tmp->len)
{
portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len);
*cbLen = tmp->len;
status = NV_OK;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
*cbLen, tmp->len);
status = NV_ERR_GENERIC;
}
return status;
}
NV_STATUS RmWriteRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 cbLen
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n");
return NV_ERR_GENERIC;
}
}
tmp->pdata = portMemAllocNonPaged(cbLen);
if (NULL == tmp->pdata)
{
NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n");
return NV_ERR_GENERIC;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY;
tmp->len = cbLen;
portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen);
return NV_OK;
}
NV_STATUS RmWriteRegistryString(
nv_state_t *nv,
const char *regParmStr,
const char *buffer,
NvU32 bufferLength
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (buffer == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->len = 0;
tmp->pdata = NULL;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR,
"failed to allocate a string registry entry!\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
}
tmp->pdata = portMemAllocNonPaged(bufferLength);
if (tmp->pdata == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n");
return NV_ERR_NO_MEMORY;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING;
tmp->len = bufferLength;
portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1));
tmp->pdata[bufferLength-1] = '\0';
return NV_OK;
}
NV_STATUS RmReadRegistryString(
nv_state_t *nv,
const char *regParmStr,
NvU8 *buffer,
NvU32 *pBufferLength
)
{
NvU32 bufferLength;
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
bufferLength = *pBufferLength;
*pBufferLength = 0;
*buffer = '\0';
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, NULL);
if (tmp == NULL)
{
return NV_ERR_GENERIC;
}
if (bufferLength >= tmp->len)
{
portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len);
*pBufferLength = tmp->len;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
bufferLength, tmp->len);
return NV_ERR_BUFFER_TOO_SMALL;
}
return NV_OK;
}
NV_STATUS RmInitRegistry(void)
{
NV_STATUS rmStatus;
rmStatus = os_registry_init();
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n");
}
return rmStatus;
}
NV_STATUS RmDestroyRegistry(nv_state_t *nv)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
if (nvp != NULL)
{
tmp = nvp->pRegistry;
nvp->pRegistry = NULL;
}
else
{
tmp = the_registry;
the_registry = NULL;
}
while (tmp != NULL)
{
nv_reg_entry_t *entry = tmp;
tmp = tmp->next;
regFreeEntry(entry);
}
return NV_OK;
}

View File

@@ -0,0 +1,628 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file
*
* @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and
* RmGetExportObjectInfo interfaces :
*
* These interfaces allow rm clients to export their objects into
* a unique RmObjExportHandle which another rm client could
* import, even if the source rm client gets destroyed.
*
* RM's device instance may get destroyed asynchronously, in which
* case exported objects residing on that device instance also get
* destroyed. This means it is not possible to import it back, but the
* RmObjExportHandle into which the object had been exported still
* remains valid but no other object could get it.
*
* There are not init/fini routines, it is the responsibility of the
* rest of RM's eco-system to make sure that all RmObjExportHandles get
* freed during driver unload.
*
* The api lock is expected to be held before calling into
* rmobjexportimport.c; do not hold gpu or any other lock.
*/
#include "rmobjexportimport.h"
#include "nvlimits.h"
#include "gpu/device/device.h"
#include "containers/map.h"
#include "rmapi/rmapi.h"
#include "rmapi/rs_utils.h"
#include "class/cl0080.h"
#include "class/cl2080.h"
#include <ctrl/ctrl0000/ctrl0000unix.h>
#include <ctrl/ctrl0000/ctrl0000client.h>
//
// A reference to an RmObjExportHandle
// generated by function RmGenerateObjExportHandle().
//
typedef struct
{
NvU32 deviceInstance;
} RmObjExportHandleRef;
MAKE_MAP(RmObjExportHandleMap, RmObjExportHandleRef);
//
// Memory allocator
//
PORT_MEM_ALLOCATOR *pMemAllocator;
//
// Map RmObjExportHandle -> RmObjExportHandleRef
//
RmObjExportHandleMap objExportHandleMap;
//
// Rm client to use to dup an object exported to RmObjExportHandle. The minimal
// requirement for duping is to have a device object allocated. This rm client
// is simply like any other external rm client and has no any special handling.
//
// We keep this rm client just like any other external rm client: if
// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by
// external rm clients and located on that gpu(s)/device gets freed (the
// os-layer does that). In that way, code in this file doesn't need to worry
// about freeing exported objects located on that gpu(s)/device.
//
NvHandle hObjExportRmClient;
//
// Tracker for device and subdevice handles. For now only one subdevice
// (instance 0) is supported per device.
//
typedef struct
{
NvHandle hRmDevice;
NvHandle hRmSubDevice;
} RmObjExportDevice;
RmObjExportDevice objExportDevice[NV_MAX_DEVICES];
//
// Usage reference counter for static object in this file like rm client used to
// dup an exported object, memory allocator, map etc.
//
NvU64 objExportImportRefCount;
//
// Static functions for internal use to code in this file.
//
static NV_STATUS RmRefObjExportImport (void);
static void RmUnrefObjExportImport (void);
static RmObjExportHandle RmGenerateObjExportHandle (NvU32 deviceInstance);
static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject);
//
// Free the RmObjExportHandle.
//
static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
RmObjExportHandleRef *pHandleRef =
mapFind(&objExportHandleMap, hObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
if (pRmApi->Free(pRmApi,
hObjExportRmClient,
(NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"Exported object trying to free was zombie in %s\n",
__FUNCTION__);
}
mapRemove(&objExportHandleMap, pHandleRef);
return NV_OK;
}
//
// Generate unique RmObjExportHandle.
//
static RmObjExportHandle RmGenerateObjExportHandle(NvU32 deviceInstance)
{
//
// The object export handle belongs to range of 0 to
// (MAX_OBJ_EXPORT_HANDLES - 1).
//
// Handle 0 is considered as invalid object handle, this function generates
// handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1).
//
#define MAX_OBJ_EXPORT_HANDLES 0x80000
static NvHandle hObjExportHandleNext = 1;
RmObjExportHandle hStartHandle = hObjExportHandleNext;
RmObjExportHandle hObject = 0;
do
{
RmObjExportHandleRef *pHandleRef;
hObject = hObjExportHandleNext++;
/* Reset hObjExportHandleNext to next valid handle */
if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) {
hObjExportHandleNext = 1;
}
pHandleRef = mapFind(&objExportHandleMap, hObject);
if (hObject != hObjExportRmClient && pHandleRef == NULL)
{
break;
}
else
{
hObject = 0;
}
} while(hObjExportHandleNext != hStartHandle);
if (hObject != 0)
{
RmObjExportHandleRef *pHandleRef =
mapInsertNew(&objExportHandleMap, hObject);
if (pHandleRef != NULL)
{
pHandleRef->deviceInstance = deviceInstance;
}
else
{
hObject = 0;
}
}
return hObject;
}
//
// Validate that the given hObject is not one of our internally used handles.
//
// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the
// caller's responsibility.
//
static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject)
{
NvU32 i;
//
// No external RmObjExportHandle could be valid if hObjExportRmClient has
// not been allocated yet, or if it is equal to any of the handles used
// internally by code in this file.
//
if (objExportImportRefCount == 0 || hObjExportRmClient == 0 ||
hObject == hObjExportRmClient)
{
return NV_FALSE;
}
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0 &&
(hObject == objExportDevice[i].hRmDevice ||
hObject == objExportDevice[i].hRmSubDevice))
{
return NV_FALSE;
}
}
return NV_TRUE;
}
//
// Increment reference count of static objects internally
// used by code in this file.
//
static NV_STATUS RmRefObjExportImport(void)
{
NV_STATUS rmStatus = NV_OK;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if ((objExportImportRefCount++) != 0)
{
NV_ASSERT(hObjExportRmClient != 0);
NV_ASSERT(pMemAllocator != NULL);
return NV_OK;
}
rmStatus = pRmApi->AllocWithHandle(pRmApi,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_ROOT,
&hObjExportRmClient);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__);
goto failed;
}
pMemAllocator = portMemAllocatorCreateNonPaged();
if (pMemAllocator == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n",
__FUNCTION__);
goto failed;
}
mapInit(&objExportHandleMap, pMemAllocator);
return NV_OK;
failed:
RmUnrefObjExportImport();
return rmStatus;
}
//
// Decrement reference count of static objects internally used by code in this
// file, and free them if reference count reaches to zero.
//
static void RmUnrefObjExportImport(void)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
if ((--objExportImportRefCount) != 0)
{
return;
}
if (pMemAllocator != NULL)
{
NvU32 i;
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0)
{
RmUnrefObjExportHandle(objExportDevice[i].hRmSubDevice);
objExportDevice[i].hRmSubDevice = 0;
RmUnrefObjExportHandle(objExportDevice[i].hRmDevice);
objExportDevice[i].hRmDevice = 0;
}
}
mapDestroy(&objExportHandleMap);
portMemAllocatorRelease(pMemAllocator);
pMemAllocator = NULL;
}
if (hObjExportRmClient != 0)
{
NV_STATUS rmStatus = pRmApi->Free(pRmApi,
hObjExportRmClient,
hObjExportRmClient);
NV_ASSERT(rmStatus == NV_OK);
hObjExportRmClient = 0;
}
}
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance)
{
RmObjExportHandle hDstObject;
NvU32 deviceInstance = NV_MAX_DEVICES;
NvHandle hTmpObject;
NvBool bClientAsDstParent = NV_FALSE;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (pDstObject == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
//
// Find the device instance on which the rm object exists.
//
hTmpObject = hSrcObject;
do
{
RsResourceRef *pResourceRef;
status = serverutilGetResourceRef(hSrcClient, hTmpObject, &pResourceRef);
if (status != NV_OK)
return status;
Device *pDevice = dynamicCast(pResourceRef->pResource, Device);
if (pDevice != NULL)
{
deviceInstance = pDevice->deviceInst;
break;
}
hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0;
} while (hTmpObject != 0);
// If a memory object is not parented by a device, use client as a parent.
if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES))
{
bClientAsDstParent = NV_TRUE;
}
status = RmRefObjExportImport();
if (status != NV_OK)
{
return status;
}
if (!bClientAsDstParent &&
((objExportDevice[deviceInstance].hRmDevice == 0) ||
serverutilValidateNewResourceHandle(hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice)))
{
//
// Device object has not been created or it got destroyed in the
// teardown path of device instance destruction; allocate a fresh device
// object.
//
NV0080_ALLOC_PARAMETERS params;
NV2080_ALLOC_PARAMETERS subdevParams;
if (objExportDevice[deviceInstance].hRmDevice == 0)
{
NV_ASSERT(objExportDevice[deviceInstance].hRmSubDevice == 0);
objExportDevice[deviceInstance].hRmDevice =
RmGenerateObjExportHandle(deviceInstance);
objExportDevice[deviceInstance].hRmSubDevice =
RmGenerateObjExportHandle(deviceInstance);
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
objExportDevice[deviceInstance].hRmSubDevice == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
}
portMemSet(&params, 0, sizeof(NV0080_ALLOC_PARAMETERS));
params.deviceId = deviceInstance;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
NV01_DEVICE_0,
&params);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n",
__FUNCTION__);
goto done;
}
portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS));
subdevParams.subDeviceId = 0;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
objExportDevice[deviceInstance].hRmSubDevice,
NV20_SUBDEVICE_0,
&subdevParams);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n",
__FUNCTION__);
(void) pRmApi->Free(pRmApi, hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice);
goto done;
}
}
hDstObject = RmGenerateObjExportHandle(deviceInstance);
if (hDstObject == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
// If duping under device handle fails, try subdevice handle.
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
bClientAsDstParent ? hObjExportRmClient :
objExportDevice[deviceInstance].hRmDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
if (!bClientAsDstParent && (status == NV_ERR_INVALID_OBJECT_PARENT))
{
NV_PRINTF(LEVEL_INFO,
"pRmApi->DupObject(Dev, failed due to invalid parent in %s."
" Now attempting DupObject with Subdev handle.\n",
__FUNCTION__);
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmSubDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
else
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
if (pDeviceInstance != NULL)
{
*pDeviceInstance = deviceInstance;
}
*pDstObject = hDstObject;
done:
if (status != NV_OK)
{
RmUnrefObjExportImport();
}
return status;
}
void RmFreeObjExportHandle(RmObjExportHandle hObject)
{
if (!RmValidateHandleAgainstInternalHandles(hObject))
{
NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n",
__FUNCTION__);
return;
}
RmUnrefObjExportHandle(hObject);
RmUnrefObjExportImport();
}
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType)
{
NV_STATUS status;
NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
if (mapFind(&objExportHandleMap, hSrcObject) == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (pObjectType != NULL)
{
params.hObject = hSrcObject;
params.mapFlags = 0;
params.addrSpaceType = \
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID;
status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient,
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE,
&params, sizeof(params));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
switch (params.addrSpaceType)
{
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC;
break;
default:
NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT);
}
}
status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject,
hObjExportRmClient, hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
return NV_OK;
}
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance)
{
RmObjExportHandleRef *pHandleRef = NULL;
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
pHandleRef = mapFind(&objExportHandleMap, hSrcObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
*deviceInstance = pHandleRef->deviceInstance;
return NV_OK;
}

View File

@@ -0,0 +1,103 @@
--undefined=rm_disable_adapter
--undefined=rm_execute_work_item
--undefined=rm_free_os_event
--undefined=rm_free_private_state
--undefined=rm_cleanup_file_private
--undefined=rm_unbind_lock
--undefined=rm_get_device_name
--undefined=rm_get_vbios_version
--undefined=rm_get_gpu_uuid
--undefined=rm_get_gpu_uuid_raw
--undefined=rm_set_rm_firmware_requested
--undefined=rm_get_firmware_version
--undefined=rm_i2c_remove_adapters
--undefined=rm_i2c_is_smbus_capable
--undefined=rm_i2c_transfer
--undefined=rm_init_adapter
--undefined=rm_init_private_state
--undefined=rm_init_rm
--undefined=rm_ioctl
--undefined=rm_is_supported_device
--undefined=rm_is_supported_pci_device
--undefined=rm_isr
--undefined=rm_isr_bh
--undefined=rm_isr_bh_unlocked
--undefined=rm_perform_version_check
--undefined=rm_power_management
--undefined=rm_stop_user_channels
--undefined=rm_restart_user_channels
--undefined=rm_read_registry_dword
--undefined=rm_run_rc_callback
--undefined=rm_run_nano_timer_callback
--undefined=rm_save_low_res_mode
--undefined=rm_shutdown_adapter
--undefined=rm_exclude_adapter
--undefined=rm_acquire_api_lock
--undefined=rm_release_api_lock
--undefined=rm_acquire_gpu_lock
--undefined=rm_release_gpu_lock
--undefined=rm_acquire_all_gpus_lock
--undefined=rm_release_all_gpus_lock
--undefined=rm_shutdown_rm
--undefined=rm_system_event
--undefined=rm_write_registry_binary
--undefined=rm_write_registry_dword
--undefined=rm_write_registry_string
--undefined=rm_parse_option_string
--undefined=rm_remove_spaces
--undefined=rm_string_token
--undefined=rm_disable_gpu_state_persistence
--undefined=pNVRM_ID
--undefined=rm_p2p_get_pages
--undefined=rm_p2p_get_pages_persistent
--undefined=rm_p2p_get_gpu_info
--undefined=rm_p2p_register_callback
--undefined=rm_p2p_put_pages
--undefined=rm_p2p_put_pages_persistent
--undefined=rm_p2p_dma_map_pages
--undefined=rm_dma_buf_dup_mem_handle
--undefined=rm_dma_buf_undup_mem_handle
--undefined=rm_dma_buf_map_mem_handle
--undefined=rm_dma_buf_unmap_mem_handle
--undefined=rm_dma_buf_get_client_and_device
--undefined=rm_dma_buf_put_client_and_device
--undefined=rm_log_gpu_crash
--undefined=rm_kernel_rmapi_op
--undefined=nv_get_hypervisor_type
--undefined=rm_gpu_copy_mmu_faults
--undefined=rm_gpu_handle_mmu_faults
--undefined=rm_gpu_copy_mmu_faults_unlocked
--undefined=rm_gpu_need_4k_page_isolation
--undefined=rm_is_chipset_io_coherent
--undefined=rm_get_device_remove_flag
--undefined=rm_init_event_locks
--undefined=rm_destroy_event_locks
--undefined=rm_get_gpu_numa_info
--undefined=rm_gpu_numa_online
--undefined=rm_gpu_numa_offline
--undefined=rm_is_device_sequestered
--undefined=nv_vgpu_create_request
--undefined=nv_vgpu_delete
--undefined=nv_vgpu_get_bar_info
--undefined=nv_vgpu_start
--undefined=nv_vgpu_get_type_ids
--undefined=nv_vgpu_get_type_info
--undefined=nv_vgpu_get_sparse_mmap
--undefined=nv_vgpu_update_request
--undefined=nv_vgpu_process_vf_info
--undefined=nv_gpu_bind_event
--undefined=rm_check_for_gpu_surprise_removal
--undefined=rm_set_external_kernel_client_count
--undefined=rm_schedule_gpu_wakeup
--undefined=rm_init_dynamic_power_management
--undefined=rm_cleanup_dynamic_power_management
--undefined=rm_ref_dynamic_power
--undefined=rm_unref_dynamic_power
--undefined=rm_transition_dynamic_power
--undefined=rm_get_vidmem_power_status
--undefined=rm_acpi_notify
--undefined=rm_get_dynamic_power_management_status
--undefined=rm_get_gpu_gcx_support
--undefined=rm_is_iommu_needed_for_sriov
--undefined=rm_disable_iomap_wc
--undefined=rm_get_clientnvpcf_power_limits

View File

@@ -0,0 +1,215 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* g_allclasses.h
*
* Pull in all class headers or class number declarations.
* The class list is generated by chip-config from Classes.pm
*
* NOTE: this file may be included multiple times
*
*/
#if defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER)
#include <class/cl0000.h> // NV01_ROOT
#include <class/cl0001.h> // NV01_ROOT_NON_PRIV
#include <class/cl0041.h> // NV01_ROOT_CLIENT
#include <class/cl0020.h> // NV0020_GPU_MANAGEMENT
#include <class/cl0080.h> // NV01_DEVICE_0
#include <class/cl2080.h> // NV20_SUBDEVICE_0
#include <class/cl2081.h> // NV2081_BINAPI
#include <class/cl2082.h> // NV2082_BINAPI_PRIVILEGED
#include <class/cl0002.h> // NV01_CONTEXT_DMA
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl00c3.h> // NV01_MEMORY_SYNCPOINT
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#include <class/cl00f2.h> // IO_VASPACE_A
#include <class/cl0005.h> // NV01_EVENT
#include <nvos.h> // NV01_EVENT_KERNEL_CALLBACK
#include <nvos.h> // NV01_EVENT_OS_EVENT
#include <nvos.h> // NV01_EVENT_KERNEL_CALLBACK_EX
#include <class/clc372sw.h> // NVC372_DISPLAY_SW
#include <class/clc673.h> // NVC673_DISP_CAPABILITIES
#include <class/cl0073.h> // NV04_DISPLAY_COMMON
#include <class/clc670.h> // NVC670_DISPLAY
#include <class/clc671.h> // NVC671_DISP_SF_USER
#include <class/clc67a.h> // NVC67A_CURSOR_IMM_CHANNEL_PIO
#include <class/clc67b.h> // NVC67B_WINDOW_IMM_CHANNEL_DMA
#include <class/clc67d.h> // NVC67D_CORE_CHANNEL_DMA
#include <class/clc67e.h> // NVC67E_WINDOW_CHANNEL_DMA
#include <class/clc77f.h> // NVC77F_ANY_CHANNEL_DMA
#include <class/cl90ec.h> // GF100_HDACODEC
#else // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER)
#ifndef NV01_ROOT
#define NV01_ROOT (0x00000000)
#endif
#ifndef NV1_ROOT
#define NV1_ROOT (0x00000000) // alias
#endif
#ifndef NV01_NULL_OBJECT
#define NV01_NULL_OBJECT (0x00000000) // alias
#endif
#ifndef NV1_NULL_OBJECT
#define NV1_NULL_OBJECT (0x00000000) // alias
#endif
#ifndef NV01_ROOT_NON_PRIV
#define NV01_ROOT_NON_PRIV (0x00000001)
#endif
#ifndef NV1_ROOT_NON_PRIV
#define NV1_ROOT_NON_PRIV (0x00000001) // alias
#endif
#ifndef NV01_ROOT_CLIENT
#define NV01_ROOT_CLIENT (0x00000041)
#endif
#ifndef NV0020_GPU_MANAGEMENT
#define NV0020_GPU_MANAGEMENT (0x00000020)
#endif
#ifndef NV01_DEVICE_0
#define NV01_DEVICE_0 (0x00000080)
#endif
#ifndef NV20_SUBDEVICE_0
#define NV20_SUBDEVICE_0 (0x00002080)
#endif
#ifndef NV2081_BINAPI
#define NV2081_BINAPI (0x00002081)
#endif
#ifndef NV2082_BINAPI_PRIVILEGED
#define NV2082_BINAPI_PRIVILEGED (0x00002082)
#endif
#ifndef NV01_CONTEXT_DMA
#define NV01_CONTEXT_DMA (0x00000002)
#endif
#ifndef NV01_MEMORY_SYSTEM
#define NV01_MEMORY_SYSTEM (0x0000003e)
#endif
#ifndef NV1_MEMORY_SYSTEM
#define NV1_MEMORY_SYSTEM (0x0000003e) // alias
#endif
#ifndef NV01_MEMORY_SYNCPOINT
#define NV01_MEMORY_SYNCPOINT (0x000000c3)
#endif
#ifndef NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071)
#endif
#ifndef IO_VASPACE_A
#define IO_VASPACE_A (0x000000f2)
#endif
#ifndef NV01_EVENT
#define NV01_EVENT (0x00000005)
#endif
#ifndef NV1_EVENT
#define NV1_EVENT (0x00000005) // alias
#endif
#ifndef NV01_EVENT_KERNEL_CALLBACK
#define NV01_EVENT_KERNEL_CALLBACK (0x00000078)
#endif
#ifndef NV1_EVENT_KERNEL_CALLBACK
#define NV1_EVENT_KERNEL_CALLBACK (0x00000078) // alias
#endif
#ifndef NV01_EVENT_OS_EVENT
#define NV01_EVENT_OS_EVENT (0x00000079)
#endif
#ifndef NV1_EVENT_OS_EVENT
#define NV1_EVENT_OS_EVENT (0x00000079) // alias
#endif
#ifndef NV01_EVENT_WIN32_EVENT
#define NV01_EVENT_WIN32_EVENT (0x00000079) // alias
#endif
#ifndef NV1_EVENT_WIN32_EVENT
#define NV1_EVENT_WIN32_EVENT (0x00000079) // alias
#endif
#ifndef NV01_EVENT_KERNEL_CALLBACK_EX
#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
#endif
#ifndef NV1_EVENT_KERNEL_CALLBACK_EX
#define NV1_EVENT_KERNEL_CALLBACK_EX (0x0000007e) // alias
#endif
#ifndef NVC372_DISPLAY_SW
#define NVC372_DISPLAY_SW (0x0000c372)
#endif
#ifndef NVC673_DISP_CAPABILITIES
#define NVC673_DISP_CAPABILITIES (0x0000c673)
#endif
#ifndef NV04_DISPLAY_COMMON
#define NV04_DISPLAY_COMMON (0x00000073)
#endif
#ifndef NVC670_DISPLAY
#define NVC670_DISPLAY (0x0000c670)
#endif
#ifndef NVC671_DISP_SF_USER
#define NVC671_DISP_SF_USER (0x0000c671)
#endif
#ifndef NVC67A_CURSOR_IMM_CHANNEL_PIO
#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000c67a)
#endif
#ifndef NVC67B_WINDOW_IMM_CHANNEL_DMA
#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000c67b)
#endif
#ifndef NVC67D_CORE_CHANNEL_DMA
#define NVC67D_CORE_CHANNEL_DMA (0x0000c67d)
#endif
#ifndef NVC67E_WINDOW_CHANNEL_DMA
#define NVC67E_WINDOW_CHANNEL_DMA (0x0000c67e)
#endif
#ifndef NVC77F_ANY_CHANNEL_DMA
#define NVC77F_ANY_CHANNEL_DMA (0x0000c77f)
#endif
#ifndef GF100_HDACODEC
#define GF100_HDACODEC (0x000090ec)
#endif
#endif // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER)

View File

@@ -0,0 +1,659 @@
#define NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_binary_api_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xb7a47c = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_BinaryApi(BinaryApi*);
void __nvoc_init_funcTable_BinaryApi(BinaryApi*);
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_BinaryApi(BinaryApi*);
void __nvoc_dtor_BinaryApi(BinaryApi*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi;
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_BinaryApi = {
/*pClassDef=*/ &__nvoc_class_def_BinaryApi,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApi,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApi = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_BinaryApi_BinaryApi,
&__nvoc_rtti_BinaryApi_GpuResource,
&__nvoc_rtti_BinaryApi_RmResource,
&__nvoc_rtti_BinaryApi_RmResourceCommon,
&__nvoc_rtti_BinaryApi_RsResource,
&__nvoc_rtti_BinaryApi_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi =
{
/*classInfo=*/ {
/*size=*/ sizeof(BinaryApi),
/*classId=*/ classId(BinaryApi),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "BinaryApi",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApi,
/*pCastInfo=*/ &__nvoc_castinfo_BinaryApi,
/*pExportInfo=*/ &__nvoc_export_info_BinaryApi
};
static NV_STATUS __nvoc_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return binapiControl((struct BinaryApi *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_binapiControlLookup(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_BinaryApi(BinaryApi *pThis) {
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_BinaryApi(BinaryApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail_GpuResource;
__nvoc_init_dataField_BinaryApi(pThis);
status = __nvoc_binapiConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail__init;
goto __nvoc_ctor_BinaryApi_exit; // Success
__nvoc_ctor_BinaryApi_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_BinaryApi_fail_GpuResource:
__nvoc_ctor_BinaryApi_exit:
return status;
}
static void __nvoc_init_funcTable_BinaryApi_1(BinaryApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__binapiControl__ = &binapiControl_IMPL;
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_BinaryApi_gpuresControl;
pThis->__binapiShareCallback__ = &__nvoc_thunk_GpuResource_binapiShareCallback;
pThis->__binapiUnmap__ = &__nvoc_thunk_GpuResource_binapiUnmap;
pThis->__binapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiGetMemInterMapParams;
pThis->__binapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor;
pThis->__binapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiGetMapAddrSpace;
pThis->__binapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiGetInternalObjectHandle;
pThis->__binapiControlFilter__ = &__nvoc_thunk_RsResource_binapiControlFilter;
pThis->__binapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiAddAdditionalDependants;
pThis->__binapiGetRefCount__ = &__nvoc_thunk_RsResource_binapiGetRefCount;
pThis->__binapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiCheckMemInterUnmap;
pThis->__binapiMapTo__ = &__nvoc_thunk_RsResource_binapiMapTo;
pThis->__binapiControl_Prologue__ = &__nvoc_thunk_RmResource_binapiControl_Prologue;
pThis->__binapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize;
pThis->__binapiCanCopy__ = &__nvoc_thunk_RsResource_binapiCanCopy;
pThis->__binapiInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiInternalControlForward;
pThis->__binapiPreDestruct__ = &__nvoc_thunk_RsResource_binapiPreDestruct;
pThis->__binapiUnmapFrom__ = &__nvoc_thunk_RsResource_binapiUnmapFrom;
pThis->__binapiControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiControl_Epilogue;
pThis->__binapiControlLookup__ = &__nvoc_thunk_RsResource_binapiControlLookup;
pThis->__binapiMap__ = &__nvoc_thunk_GpuResource_binapiMap;
pThis->__binapiAccessCallback__ = &__nvoc_thunk_RmResource_binapiAccessCallback;
}
void __nvoc_init_funcTable_BinaryApi(BinaryApi *pThis) {
__nvoc_init_funcTable_BinaryApi_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_BinaryApi(BinaryApi *pThis) {
pThis->__nvoc_pbase_BinaryApi = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_BinaryApi(pThis);
}
NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
BinaryApi *pThis;
pThis = portMemAllocNonPaged(sizeof(BinaryApi));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(BinaryApi));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApi);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_BinaryApi(pThis);
status = __nvoc_ctor_BinaryApi(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_BinaryApi_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_BinaryApi_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_BinaryApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x1c0579 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged*);
void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged*);
NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged*);
void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged;
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged = {
/*pClassDef=*/ &__nvoc_class_def_BinaryApiPrivileged,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApiPrivileged,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource),
};
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApi = {
/*pClassDef=*/ &__nvoc_class_def_BinaryApi,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApiPrivileged = {
/*numRelatives=*/ 7,
/*relatives=*/ {
&__nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged,
&__nvoc_rtti_BinaryApiPrivileged_BinaryApi,
&__nvoc_rtti_BinaryApiPrivileged_GpuResource,
&__nvoc_rtti_BinaryApiPrivileged_RmResource,
&__nvoc_rtti_BinaryApiPrivileged_RmResourceCommon,
&__nvoc_rtti_BinaryApiPrivileged_RsResource,
&__nvoc_rtti_BinaryApiPrivileged_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged =
{
/*classInfo=*/ {
/*size=*/ sizeof(BinaryApiPrivileged),
/*classId=*/ classId(BinaryApiPrivileged),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "BinaryApiPrivileged",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApiPrivileged,
/*pCastInfo=*/ &__nvoc_castinfo_BinaryApiPrivileged,
/*pExportInfo=*/ &__nvoc_export_info_BinaryApiPrivileged
};
static NV_STATUS __nvoc_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return binapiprivControl((struct BinaryApiPrivileged *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApiPrivileged_BinaryApi.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlLookup(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_BinaryApi(BinaryApi*);
void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
__nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_BinaryApi(&pThis->__nvoc_base_BinaryApi, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi;
__nvoc_init_dataField_BinaryApiPrivileged(pThis);
status = __nvoc_binapiprivConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail__init;
goto __nvoc_ctor_BinaryApiPrivileged_exit; // Success
__nvoc_ctor_BinaryApiPrivileged_fail__init:
__nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi);
__nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi:
__nvoc_ctor_BinaryApiPrivileged_exit:
return status;
}
static void __nvoc_init_funcTable_BinaryApiPrivileged_1(BinaryApiPrivileged *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__binapiprivControl__ = &binapiprivControl_IMPL;
pThis->__nvoc_base_BinaryApi.__binapiControl__ = &__nvoc_thunk_BinaryApiPrivileged_binapiControl;
pThis->__binapiprivShareCallback__ = &__nvoc_thunk_GpuResource_binapiprivShareCallback;
pThis->__binapiprivUnmap__ = &__nvoc_thunk_GpuResource_binapiprivUnmap;
pThis->__binapiprivGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiprivGetMemInterMapParams;
pThis->__binapiprivGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor;
pThis->__binapiprivGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace;
pThis->__binapiprivGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle;
pThis->__binapiprivControlFilter__ = &__nvoc_thunk_RsResource_binapiprivControlFilter;
pThis->__binapiprivAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiprivAddAdditionalDependants;
pThis->__binapiprivGetRefCount__ = &__nvoc_thunk_RsResource_binapiprivGetRefCount;
pThis->__binapiprivCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap;
pThis->__binapiprivMapTo__ = &__nvoc_thunk_RsResource_binapiprivMapTo;
pThis->__binapiprivControl_Prologue__ = &__nvoc_thunk_RmResource_binapiprivControl_Prologue;
pThis->__binapiprivGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize;
pThis->__binapiprivCanCopy__ = &__nvoc_thunk_RsResource_binapiprivCanCopy;
pThis->__binapiprivInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiprivInternalControlForward;
pThis->__binapiprivPreDestruct__ = &__nvoc_thunk_RsResource_binapiprivPreDestruct;
pThis->__binapiprivUnmapFrom__ = &__nvoc_thunk_RsResource_binapiprivUnmapFrom;
pThis->__binapiprivControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiprivControl_Epilogue;
pThis->__binapiprivControlLookup__ = &__nvoc_thunk_RsResource_binapiprivControlLookup;
pThis->__binapiprivMap__ = &__nvoc_thunk_GpuResource_binapiprivMap;
pThis->__binapiprivAccessCallback__ = &__nvoc_thunk_RmResource_binapiprivAccessCallback;
}
void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
__nvoc_init_funcTable_BinaryApiPrivileged_1(pThis);
}
void __nvoc_init_BinaryApi(BinaryApi*);
void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
pThis->__nvoc_pbase_BinaryApiPrivileged = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource;
pThis->__nvoc_pbase_BinaryApi = &pThis->__nvoc_base_BinaryApi;
__nvoc_init_BinaryApi(&pThis->__nvoc_base_BinaryApi);
__nvoc_init_funcTable_BinaryApiPrivileged(pThis);
}
NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
BinaryApiPrivileged *pThis;
pThis = portMemAllocNonPaged(sizeof(BinaryApiPrivileged));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(BinaryApiPrivileged));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApiPrivileged);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_BinaryApiPrivileged(pThis);
status = __nvoc_ctor_BinaryApiPrivileged(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_BinaryApiPrivileged_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_BinaryApiPrivileged_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_BinaryApiPrivileged(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,416 @@
#ifndef _G_BINARY_API_NVOC_H_
#define _G_BINARY_API_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_binary_api_nvoc.h"
#ifndef BINARY_API_H
#define BINARY_API_H
#include "core/core.h"
#include "rmapi/resource.h"
#include "gpu/gpu_resource.h"
#include "resserv/rs_resource.h"
#include "rmapi/control.h"
#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct BinaryApi {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct BinaryApi *__nvoc_pbase_BinaryApi;
NV_STATUS (*__binapiControl__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__binapiShareCallback__)(struct BinaryApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__binapiUnmap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__binapiGetMemInterMapParams__)(struct BinaryApi *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__binapiGetMemoryMappingDescriptor__)(struct BinaryApi *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__binapiGetMapAddrSpace__)(struct BinaryApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__binapiGetInternalObjectHandle__)(struct BinaryApi *);
NV_STATUS (*__binapiControlFilter__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__binapiAddAdditionalDependants__)(struct RsClient *, struct BinaryApi *, RsResourceRef *);
NvU32 (*__binapiGetRefCount__)(struct BinaryApi *);
NV_STATUS (*__binapiCheckMemInterUnmap__)(struct BinaryApi *, NvBool);
NV_STATUS (*__binapiMapTo__)(struct BinaryApi *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__binapiControl_Prologue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__binapiGetRegBaseOffsetAndSize__)(struct BinaryApi *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__binapiCanCopy__)(struct BinaryApi *);
NV_STATUS (*__binapiInternalControlForward__)(struct BinaryApi *, NvU32, void *, NvU32);
void (*__binapiPreDestruct__)(struct BinaryApi *);
NV_STATUS (*__binapiUnmapFrom__)(struct BinaryApi *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__binapiControl_Epilogue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__binapiControlLookup__)(struct BinaryApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__binapiMap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__binapiAccessCallback__)(struct BinaryApi *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__
#define __NVOC_CLASS_BinaryApi_TYPEDEF__
typedef struct BinaryApi BinaryApi;
#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */
#ifndef __nvoc_class_id_BinaryApi
#define __nvoc_class_id_BinaryApi 0xb7a47c
#endif /* __nvoc_class_id_BinaryApi */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
#define __staticCast_BinaryApi(pThis) \
((pThis)->__nvoc_pbase_BinaryApi)
#ifdef __nvoc_binary_api_h_disabled
#define __dynamicCast_BinaryApi(pThis) ((BinaryApi*)NULL)
#else //__nvoc_binary_api_h_disabled
#define __dynamicCast_BinaryApi(pThis) \
((BinaryApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApi)))
#endif //__nvoc_binary_api_h_disabled
NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_BinaryApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_BinaryApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define binapiControl(pResource, pCallContext, pParams) binapiControl_DISPATCH(pResource, pCallContext, pParams)
#define binapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define binapiUnmap(pGpuResource, pCallContext, pCpuMapping) binapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define binapiGetMemInterMapParams(pRmResource, pParams) binapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define binapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define binapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define binapiGetInternalObjectHandle(pGpuResource) binapiGetInternalObjectHandle_DISPATCH(pGpuResource)
#define binapiControlFilter(pResource, pCallContext, pParams) binapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define binapiAddAdditionalDependants(pClient, pResource, pReference) binapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define binapiGetRefCount(pResource) binapiGetRefCount_DISPATCH(pResource)
#define binapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define binapiMapTo(pResource, pParams) binapiMapTo_DISPATCH(pResource, pParams)
#define binapiControl_Prologue(pResource, pCallContext, pParams) binapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define binapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define binapiCanCopy(pResource) binapiCanCopy_DISPATCH(pResource)
#define binapiInternalControlForward(pGpuResource, command, pParams, size) binapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define binapiPreDestruct(pResource) binapiPreDestruct_DISPATCH(pResource)
#define binapiUnmapFrom(pResource, pParams) binapiUnmapFrom_DISPATCH(pResource, pParams)
#define binapiControl_Epilogue(pResource, pCallContext, pParams) binapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define binapiControlLookup(pResource, pParams, ppEntry) binapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define binapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define binapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS binapiControl_IMPL(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
static inline NV_STATUS binapiControl_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiControl__(pResource, pCallContext, pParams);
}
static inline NvBool binapiShareCallback_DISPATCH(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__binapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS binapiUnmap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__binapiUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS binapiGetMemInterMapParams_DISPATCH(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__binapiGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS binapiGetMemoryMappingDescriptor_DISPATCH(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__binapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS binapiGetMapAddrSpace_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__binapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle binapiGetInternalObjectHandle_DISPATCH(struct BinaryApi *pGpuResource) {
return pGpuResource->__binapiGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS binapiControlFilter_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiControlFilter__(pResource, pCallContext, pParams);
}
static inline void binapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) {
pResource->__binapiAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 binapiGetRefCount_DISPATCH(struct BinaryApi *pResource) {
return pResource->__binapiGetRefCount__(pResource);
}
static inline NV_STATUS binapiCheckMemInterUnmap_DISPATCH(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__binapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS binapiMapTo_DISPATCH(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__binapiMapTo__(pResource, pParams);
}
static inline NV_STATUS binapiControl_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS binapiGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__binapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
static inline NvBool binapiCanCopy_DISPATCH(struct BinaryApi *pResource) {
return pResource->__binapiCanCopy__(pResource);
}
static inline NV_STATUS binapiInternalControlForward_DISPATCH(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__binapiInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void binapiPreDestruct_DISPATCH(struct BinaryApi *pResource) {
pResource->__binapiPreDestruct__(pResource);
}
static inline NV_STATUS binapiUnmapFrom_DISPATCH(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__binapiUnmapFrom__(pResource, pParams);
}
static inline void binapiControl_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__binapiControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS binapiControlLookup_DISPATCH(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__binapiControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS binapiMap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__binapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool binapiAccessCallback_DISPATCH(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__binapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS binapiConstruct_IMPL(struct BinaryApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_binapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct BinaryApiPrivileged {
const struct NVOC_RTTI *__nvoc_rtti;
struct BinaryApi __nvoc_base_BinaryApi;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct BinaryApi *__nvoc_pbase_BinaryApi;
struct BinaryApiPrivileged *__nvoc_pbase_BinaryApiPrivileged;
NV_STATUS (*__binapiprivControl__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__binapiprivShareCallback__)(struct BinaryApiPrivileged *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__binapiprivUnmap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__binapiprivGetMemInterMapParams__)(struct BinaryApiPrivileged *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__binapiprivGetMemoryMappingDescriptor__)(struct BinaryApiPrivileged *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__binapiprivGetMapAddrSpace__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__binapiprivGetInternalObjectHandle__)(struct BinaryApiPrivileged *);
NV_STATUS (*__binapiprivControlFilter__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__binapiprivAddAdditionalDependants__)(struct RsClient *, struct BinaryApiPrivileged *, RsResourceRef *);
NvU32 (*__binapiprivGetRefCount__)(struct BinaryApiPrivileged *);
NV_STATUS (*__binapiprivCheckMemInterUnmap__)(struct BinaryApiPrivileged *, NvBool);
NV_STATUS (*__binapiprivMapTo__)(struct BinaryApiPrivileged *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__binapiprivControl_Prologue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__binapiprivGetRegBaseOffsetAndSize__)(struct BinaryApiPrivileged *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__binapiprivCanCopy__)(struct BinaryApiPrivileged *);
NV_STATUS (*__binapiprivInternalControlForward__)(struct BinaryApiPrivileged *, NvU32, void *, NvU32);
void (*__binapiprivPreDestruct__)(struct BinaryApiPrivileged *);
NV_STATUS (*__binapiprivUnmapFrom__)(struct BinaryApiPrivileged *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__binapiprivControl_Epilogue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__binapiprivControlLookup__)(struct BinaryApiPrivileged *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__binapiprivMap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__binapiprivAccessCallback__)(struct BinaryApiPrivileged *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__
#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__
typedef struct BinaryApiPrivileged BinaryApiPrivileged;
#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */
#ifndef __nvoc_class_id_BinaryApiPrivileged
#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579
#endif /* __nvoc_class_id_BinaryApiPrivileged */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged;
#define __staticCast_BinaryApiPrivileged(pThis) \
((pThis)->__nvoc_pbase_BinaryApiPrivileged)
#ifdef __nvoc_binary_api_h_disabled
#define __dynamicCast_BinaryApiPrivileged(pThis) ((BinaryApiPrivileged*)NULL)
#else //__nvoc_binary_api_h_disabled
#define __dynamicCast_BinaryApiPrivileged(pThis) \
((BinaryApiPrivileged*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApiPrivileged)))
#endif //__nvoc_binary_api_h_disabled
NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_BinaryApiPrivileged(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_BinaryApiPrivileged((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define binapiprivControl(pResource, pCallContext, pParams) binapiprivControl_DISPATCH(pResource, pCallContext, pParams)
#define binapiprivShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiprivShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define binapiprivUnmap(pGpuResource, pCallContext, pCpuMapping) binapiprivUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define binapiprivGetMemInterMapParams(pRmResource, pParams) binapiprivGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define binapiprivGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiprivGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define binapiprivGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiprivGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define binapiprivGetInternalObjectHandle(pGpuResource) binapiprivGetInternalObjectHandle_DISPATCH(pGpuResource)
#define binapiprivControlFilter(pResource, pCallContext, pParams) binapiprivControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define binapiprivAddAdditionalDependants(pClient, pResource, pReference) binapiprivAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define binapiprivGetRefCount(pResource) binapiprivGetRefCount_DISPATCH(pResource)
#define binapiprivCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiprivCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define binapiprivMapTo(pResource, pParams) binapiprivMapTo_DISPATCH(pResource, pParams)
#define binapiprivControl_Prologue(pResource, pCallContext, pParams) binapiprivControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define binapiprivGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiprivGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define binapiprivCanCopy(pResource) binapiprivCanCopy_DISPATCH(pResource)
#define binapiprivInternalControlForward(pGpuResource, command, pParams, size) binapiprivInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define binapiprivPreDestruct(pResource) binapiprivPreDestruct_DISPATCH(pResource)
#define binapiprivUnmapFrom(pResource, pParams) binapiprivUnmapFrom_DISPATCH(pResource, pParams)
#define binapiprivControl_Epilogue(pResource, pCallContext, pParams) binapiprivControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define binapiprivControlLookup(pResource, pParams, ppEntry) binapiprivControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define binapiprivMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiprivMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define binapiprivAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiprivAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS binapiprivControl_IMPL(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
static inline NV_STATUS binapiprivControl_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiprivControl__(pResource, pCallContext, pParams);
}
static inline NvBool binapiprivShareCallback_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__binapiprivShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS binapiprivUnmap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__binapiprivUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS binapiprivGetMemInterMapParams_DISPATCH(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__binapiprivGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS binapiprivGetMemoryMappingDescriptor_DISPATCH(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__binapiprivGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS binapiprivGetMapAddrSpace_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__binapiprivGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle binapiprivGetInternalObjectHandle_DISPATCH(struct BinaryApiPrivileged *pGpuResource) {
return pGpuResource->__binapiprivGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS binapiprivControlFilter_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiprivControlFilter__(pResource, pCallContext, pParams);
}
static inline void binapiprivAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) {
pResource->__binapiprivAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 binapiprivGetRefCount_DISPATCH(struct BinaryApiPrivileged *pResource) {
return pResource->__binapiprivGetRefCount__(pResource);
}
static inline NV_STATUS binapiprivCheckMemInterUnmap_DISPATCH(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__binapiprivCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS binapiprivMapTo_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__binapiprivMapTo__(pResource, pParams);
}
static inline NV_STATUS binapiprivControl_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__binapiprivControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS binapiprivGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__binapiprivGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
static inline NvBool binapiprivCanCopy_DISPATCH(struct BinaryApiPrivileged *pResource) {
return pResource->__binapiprivCanCopy__(pResource);
}
static inline NV_STATUS binapiprivInternalControlForward_DISPATCH(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__binapiprivInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void binapiprivPreDestruct_DISPATCH(struct BinaryApiPrivileged *pResource) {
pResource->__binapiprivPreDestruct__(pResource);
}
static inline NV_STATUS binapiprivUnmapFrom_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__binapiprivUnmapFrom__(pResource, pParams);
}
static inline void binapiprivControl_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__binapiprivControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS binapiprivControlLookup_DISPATCH(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__binapiprivControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS binapiprivMap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__binapiprivMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool binapiprivAccessCallback_DISPATCH(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__binapiprivAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS binapiprivConstruct_IMPL(struct BinaryApiPrivileged *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_binapiprivConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiprivConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_BINARY_API_NVOC_H_

View File

@@ -0,0 +1,3 @@
#include "g_chips2halspec_nvoc.h"

View File

@@ -0,0 +1,45 @@
#define NVOC_CHIPS2HALSPEC_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_chips2halspec_nvoc.h"
void __nvoc_init_halspec_ChipHal(ChipHal *pChipHal, NvU32 arch, NvU32 impl, NvU32 hidrev)
{
// T234D
if(arch == 0x0 && impl == 0x0 && hidrev == 0x235)
{
pChipHal->__nvoc_HalVarIdx = 80;
}
}
void __nvoc_init_halspec_RmVariantHal(RmVariantHal *pRmVariantHal, RM_RUNTIME_VARIANT rmVariant)
{
// PF_KERNEL_ONLY
if(rmVariant == 0x2)
{
pRmVariantHal->__nvoc_HalVarIdx = 1;
}
}
void __nvoc_init_halspec_DispIpHal(DispIpHal *pDispIpHal, NvU32 ipver)
{
// DISPv0402
if(ipver == 0x4020000)
{
pDispIpHal->__nvoc_HalVarIdx = 12;
}
}
void __nvoc_init_halspec_DpuIpHal(DpuIpHal *pDpuIpHal, NvU32 ipver)
{
// DPUv0000
if(ipver == 0x0)
{
pDpuIpHal->__nvoc_HalVarIdx = 5;
}
}

View File

@@ -0,0 +1,118 @@
#ifndef _G_CHIPS2HALSPEC_NVOC_H_
#define _G_CHIPS2HALSPEC_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "g_chips2halspec_nvoc.h"
#ifndef _CHIPS_2_HALSPEC_H_
#define _CHIPS_2_HALSPEC_H_
#include "nvtypes.h"
#include "rmconfig.h"
// Several WARs that only visible by NVOC compiler
#define GPUHAL_ARCH(x) NV_PMC_BOOT_0_ARCHITECTURE_##x
#define GPUHAL_IMPL(x) NV_PMC_BOOT_0_IMPLEMENTATION_##x
// Create alias 'group' to provide a concise syntax
#define group variant_group
// Use in hal block to indicate that the function isn't wried to any enabled chips
#define __disabled__ false
struct ChipHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct ChipHal ChipHal;
void __nvoc_init_halspec_ChipHal(ChipHal*, NvU32, NvU32, NvU32);
/*
* RM Runtime Variant Halspec
*
* One group of Hal Variants that presents two perspectives:
*
* Operating Environment Perspective: VF / PF / UCODE
* VF | PF | UCODE = true
* VF & PF & UCODE = false
*
* VF : RM is running in VGPU Guest environment. Equals to IS_VIRTUAL(pGpu)
* PF : RM is running in Host/Baremetal in standard PCIE environment
* UCODE : RM is running on microcontroller
*
* Functionality-Based Perspective: KERNEL_ONLY / PHYSICAL_ONLY / MONOLITHIC
* KERNEL_ONLY | PHYSICAL_ONLY | MONOLITHIC = true
* KERNEL_ONLY & PHYSICAL_ONLY & MONOLITHIC = false
*
* KERNEL_ONLY : RM does not own HW. The physical part is offloaded to Ucode.
* PHYSICAL_ONLY : RM owns HW but does not expose services to RM Clients
* MONOLITHIC : RM owns both the interface to the client and the underlying HW.
*
* Note: GSP Client "IS_GSP_CLIENT(pGpu) maps to "PF_KERNEL_ONLY"
* DCE Client maps to "PF_KERNEL_ONLY & T234D"
*
*
* HAL Variants
* +--------+ +----------------+
* | VF | <-----| VF |--+
* +--------+ +----------------+ | +---------------+
* |--> | KERNEL_ONLY |
* +----------------+ | +---------------+
* +--| PF_KERNEL_ONLY |--+
* +--------+ | +----------------+
* | PF | <--|
* +--------+ | +----------------+ +---------------+
* +--| PF_MONOLITHIC |-----> | MONOLITHIC |
* +----------------+ +---------------+
*
* +--------+ +----------------+ +---------------+
* | UCODE | <-----| UCODE |-----> | PHYSICAL_ONLY |
* +--------+ +----------------+ +---------------+
*
* */
typedef enum _RM_RUNTIME_VARIANT {
RM_RUNTIME_VARIANT_VF = 1,
RM_RUNTIME_VARIANT_PF_KERNEL_ONLY = 2,
RM_RUNTIME_VARIANT_PF_MONOLITHIC = 3,
RM_RUNTIME_VARIANT_UCODE = 4,
} RM_RUNTIME_VARIANT;
struct RmVariantHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct RmVariantHal RmVariantHal;
void __nvoc_init_halspec_RmVariantHal(RmVariantHal*, RM_RUNTIME_VARIANT);
/* DISP IP versions */
struct DispIpHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct DispIpHal DispIpHal;
void __nvoc_init_halspec_DispIpHal(DispIpHal*, NvU32);
/* The 'delete' rules for DispIpHal and ChipHal */
// delete DISPv0402 & ~T234D;
// delete ~DISPv0402 & T234D;
/* DPU IP versions */
struct DpuIpHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct DpuIpHal DpuIpHal;
void __nvoc_init_halspec_DpuIpHal(DpuIpHal*, NvU32);
/* The 'delete' rules for DpuIpHal and ChipHal */
#undef group
#endif /* _CHIPS_2_HALSPEC_H_ */
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CHIPS2HALSPEC_NVOC_H_

View File

@@ -0,0 +1,385 @@
#define NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_client_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x21d236 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared;
void __nvoc_init_UserInfo(UserInfo*);
void __nvoc_init_funcTable_UserInfo(UserInfo*);
NV_STATUS __nvoc_ctor_UserInfo(UserInfo*);
void __nvoc_init_dataField_UserInfo(UserInfo*);
void __nvoc_dtor_UserInfo(UserInfo*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo;
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_UserInfo = {
/*pClassDef=*/ &__nvoc_class_def_UserInfo,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserInfo,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_RsShared = {
/*pClassDef=*/ &__nvoc_class_def_RsShared,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_UserInfo = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_UserInfo_UserInfo,
&__nvoc_rtti_UserInfo_RsShared,
&__nvoc_rtti_UserInfo_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo =
{
/*classInfo=*/ {
/*size=*/ sizeof(UserInfo),
/*classId=*/ classId(UserInfo),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "UserInfo",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserInfo,
/*pCastInfo=*/ &__nvoc_castinfo_UserInfo,
/*pExportInfo=*/ &__nvoc_export_info_UserInfo
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RsShared(RsShared*);
void __nvoc_dtor_UserInfo(UserInfo *pThis) {
__nvoc_userinfoDestruct(pThis);
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_UserInfo(UserInfo *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RsShared(RsShared* );
NV_STATUS __nvoc_ctor_UserInfo(UserInfo *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared);
if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail_RsShared;
__nvoc_init_dataField_UserInfo(pThis);
status = __nvoc_userinfoConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail__init;
goto __nvoc_ctor_UserInfo_exit; // Success
__nvoc_ctor_UserInfo_fail__init:
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
__nvoc_ctor_UserInfo_fail_RsShared:
__nvoc_ctor_UserInfo_exit:
return status;
}
static void __nvoc_init_funcTable_UserInfo_1(UserInfo *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_UserInfo(UserInfo *pThis) {
__nvoc_init_funcTable_UserInfo_1(pThis);
}
void __nvoc_init_RsShared(RsShared*);
void __nvoc_init_UserInfo(UserInfo *pThis) {
pThis->__nvoc_pbase_UserInfo = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object;
pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared;
__nvoc_init_RsShared(&pThis->__nvoc_base_RsShared);
__nvoc_init_funcTable_UserInfo(pThis);
}
NV_STATUS __nvoc_objCreate_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
UserInfo *pThis;
pThis = portMemAllocNonPaged(sizeof(UserInfo));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(UserInfo));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UserInfo);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_UserInfo(pThis);
status = __nvoc_ctor_UserInfo(pThis);
if (status != NV_OK) goto __nvoc_objCreate_UserInfo_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_UserInfo_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_UserInfo(ppThis, pParent, createFlags);
return status;
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xb23d83 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient;
void __nvoc_init_RmClient(RmClient*);
void __nvoc_init_funcTable_RmClient(RmClient*);
NV_STATUS __nvoc_ctor_RmClient(RmClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_RmClient(RmClient*);
void __nvoc_dtor_RmClient(RmClient*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient;
static const struct NVOC_RTTI __nvoc_rtti_RmClient_RmClient = {
/*pClassDef=*/ &__nvoc_class_def_RmClient,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClient,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_RmClient_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_RmClient_RsClient = {
/*pClassDef=*/ &__nvoc_class_def_RsClient,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_RmClient = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_RmClient_RmClient,
&__nvoc_rtti_RmClient_RsClient,
&__nvoc_rtti_RmClient_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient =
{
/*classInfo=*/ {
/*size=*/ sizeof(RmClient),
/*classId=*/ classId(RmClient),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "RmClient",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClient,
/*pCastInfo=*/ &__nvoc_castinfo_RmClient,
/*pExportInfo=*/ &__nvoc_export_info_RmClient
};
static NV_STATUS __nvoc_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) {
return rmclientValidate((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pSecInfo);
}
static NV_STATUS __nvoc_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) {
return rmclientFreeResource((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pServer, pParams);
}
static NV_STATUS __nvoc_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) {
return rmclientInterMap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pMappableRef, pParams);
}
static void __nvoc_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) {
rmclientInterUnmap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pParams);
}
static NV_STATUS __nvoc_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) {
return rmclientPostProcessPendingFreeList((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), ppFirstLowPriRef);
}
static NV_STATUS __nvoc_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) {
return clientDestructResourceRef((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pServer, pResourceRef);
}
static NV_STATUS __nvoc_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) {
return clientValidateNewResourceHandle((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), hResource, bRestrict);
}
static NV_STATUS __nvoc_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) {
return clientShareResource((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pSharePolicy, pCallContext);
}
static NV_STATUS __nvoc_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) {
return clientUnmapMemory((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pLockInfo, ppCpuMapping, pSecInfo);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RsClient(RsClient*);
void __nvoc_dtor_RmClient(RmClient *pThis) {
__nvoc_rmclientDestruct(pThis);
__nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_RmClient(RmClient *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RsClient(RsClient* , struct PORT_MEM_ALLOCATOR *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_RmClient(RmClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RsClient(&pThis->__nvoc_base_RsClient, arg_pAllocator, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_RmClient_fail_RsClient;
__nvoc_init_dataField_RmClient(pThis);
status = __nvoc_rmclientConstruct(pThis, arg_pAllocator, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_RmClient_fail__init;
goto __nvoc_ctor_RmClient_exit; // Success
__nvoc_ctor_RmClient_fail__init:
__nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient);
__nvoc_ctor_RmClient_fail_RsClient:
__nvoc_ctor_RmClient_exit:
return status;
}
static void __nvoc_init_funcTable_RmClient_1(RmClient *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__rmclientValidate__ = &rmclientValidate_IMPL;
pThis->__rmclientFreeResource__ = &rmclientFreeResource_IMPL;
pThis->__rmclientInterMap__ = &rmclientInterMap_IMPL;
pThis->__rmclientInterUnmap__ = &rmclientInterUnmap_IMPL;
pThis->__rmclientPostProcessPendingFreeList__ = &rmclientPostProcessPendingFreeList_IMPL;
pThis->__nvoc_base_RsClient.__clientValidate__ = &__nvoc_thunk_RmClient_clientValidate;
pThis->__nvoc_base_RsClient.__clientFreeResource__ = &__nvoc_thunk_RmClient_clientFreeResource;
pThis->__nvoc_base_RsClient.__clientInterMap__ = &__nvoc_thunk_RmClient_clientInterMap;
pThis->__nvoc_base_RsClient.__clientInterUnmap__ = &__nvoc_thunk_RmClient_clientInterUnmap;
pThis->__nvoc_base_RsClient.__clientPostProcessPendingFreeList__ = &__nvoc_thunk_RmClient_clientPostProcessPendingFreeList;
pThis->__rmclientDestructResourceRef__ = &__nvoc_thunk_RsClient_rmclientDestructResourceRef;
pThis->__rmclientValidateNewResourceHandle__ = &__nvoc_thunk_RsClient_rmclientValidateNewResourceHandle;
pThis->__rmclientShareResource__ = &__nvoc_thunk_RsClient_rmclientShareResource;
pThis->__rmclientUnmapMemory__ = &__nvoc_thunk_RsClient_rmclientUnmapMemory;
}
void __nvoc_init_funcTable_RmClient(RmClient *pThis) {
__nvoc_init_funcTable_RmClient_1(pThis);
}
void __nvoc_init_RsClient(RsClient*);
void __nvoc_init_RmClient(RmClient *pThis) {
pThis->__nvoc_pbase_RmClient = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClient.__nvoc_base_Object;
pThis->__nvoc_pbase_RsClient = &pThis->__nvoc_base_RsClient;
__nvoc_init_RsClient(&pThis->__nvoc_base_RsClient);
__nvoc_init_funcTable_RmClient(pThis);
}
NV_STATUS __nvoc_objCreate_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
RmClient *pThis;
pThis = portMemAllocNonPaged(sizeof(RmClient));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(RmClient));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClient);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RsClient.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_RmClient(pThis);
status = __nvoc_ctor_RmClient(pThis, arg_pAllocator, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_RmClient_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_RmClient_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_RmClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams);
return status;
}

View File

@@ -0,0 +1,323 @@
#ifndef _G_CLIENT_NVOC_H_
#define _G_CLIENT_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_client_nvoc.h"
#ifndef _CLIENT_H_
#define _CLIENT_H_
#include "ctrl/ctrl0000/ctrl0000proc.h" // NV_PROC_NAME_MAX_LENGTH
#include "containers/btree.h"
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "resserv/rs_client.h"
#include "rmapi/resource.h"
#include "rmapi/event.h"
#include "nvsecurityinfo.h"
// event information definitions
typedef struct _def_client_system_event_info CLI_SYSTEM_EVENT_INFO, *PCLI_SYSTEM_EVENT_INFO;
/**
* This ref-counted object is shared by all clients that were registered under
* the same user and is used to identify clients from the same user.
*/
#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct UserInfo {
const struct NVOC_RTTI *__nvoc_rtti;
struct RsShared __nvoc_base_RsShared;
struct Object *__nvoc_pbase_Object;
struct RsShared *__nvoc_pbase_RsShared;
struct UserInfo *__nvoc_pbase_UserInfo;
PUID_TOKEN pUidToken;
};
#ifndef __NVOC_CLASS_UserInfo_TYPEDEF__
#define __NVOC_CLASS_UserInfo_TYPEDEF__
typedef struct UserInfo UserInfo;
#endif /* __NVOC_CLASS_UserInfo_TYPEDEF__ */
#ifndef __nvoc_class_id_UserInfo
#define __nvoc_class_id_UserInfo 0x21d236
#endif /* __nvoc_class_id_UserInfo */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo;
#define __staticCast_UserInfo(pThis) \
((pThis)->__nvoc_pbase_UserInfo)
#ifdef __nvoc_client_h_disabled
#define __dynamicCast_UserInfo(pThis) ((UserInfo*)NULL)
#else //__nvoc_client_h_disabled
#define __dynamicCast_UserInfo(pThis) \
((UserInfo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UserInfo)))
#endif //__nvoc_client_h_disabled
NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_UserInfo(UserInfo**, Dynamic*, NvU32);
#define __objCreate_UserInfo(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_UserInfo((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS userinfoConstruct_IMPL(struct UserInfo *arg_pUserInfo);
#define __nvoc_userinfoConstruct(arg_pUserInfo) userinfoConstruct_IMPL(arg_pUserInfo)
void userinfoDestruct_IMPL(struct UserInfo *pUserInfo);
#define __nvoc_userinfoDestruct(pUserInfo) userinfoDestruct_IMPL(pUserInfo)
#undef PRIVATE_FIELD
// Flags for RmClient
#define RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT 0x00000001
#define RMAPI_CLIENT_FLAG_DELETE_PENDING 0x00000002
// Values for client debugger state
#define RMAPI_CLIENT_DEBUGGER_STATE_NOT_SET 0x00000000
#define RMAPI_CLIENT_DEBUGGER_STATE_COMPUTE_ACTIVE 0x00000001
#define RMAPI_CLIENT_DEBUGGER_STATE_DEBUG_ACTIVE 0x00000002
#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct RmClient {
const struct NVOC_RTTI *__nvoc_rtti;
struct RsClient __nvoc_base_RsClient;
struct Object *__nvoc_pbase_Object;
struct RsClient *__nvoc_pbase_RsClient;
struct RmClient *__nvoc_pbase_RmClient;
NV_STATUS (*__rmclientValidate__)(struct RmClient *, const API_SECURITY_INFO *);
NV_STATUS (*__rmclientFreeResource__)(struct RmClient *, struct RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *);
NV_STATUS (*__rmclientInterMap__)(struct RmClient *, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *);
void (*__rmclientInterUnmap__)(struct RmClient *, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *);
NV_STATUS (*__rmclientPostProcessPendingFreeList__)(struct RmClient *, struct RsResourceRef **);
NV_STATUS (*__rmclientDestructResourceRef__)(struct RmClient *, RsServer *, struct RsResourceRef *);
NV_STATUS (*__rmclientValidateNewResourceHandle__)(struct RmClient *, NvHandle, NvBool);
NV_STATUS (*__rmclientShareResource__)(struct RmClient *, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *);
NV_STATUS (*__rmclientUnmapMemory__)(struct RmClient *, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *);
RS_PRIV_LEVEL cachedPrivilege;
NvBool bIsRootNonPriv;
NvU32 ProcID;
NvU32 SubProcessID;
char SubProcessName[100];
NvBool bIsSubProcessDisabled;
NvU32 Flags;
NvU32 ClientDebuggerState;
void *pOSInfo;
char name[100];
CLI_SYSTEM_EVENT_INFO CliSysEventInfo;
PSECURITY_TOKEN pSecurityToken;
struct UserInfo *pUserInfo;
NvBool bIsClientVirtualMode;
PNODE pCliSyncGpuBoostTree;
};
#ifndef __NVOC_CLASS_RmClient_TYPEDEF__
#define __NVOC_CLASS_RmClient_TYPEDEF__
typedef struct RmClient RmClient;
#endif /* __NVOC_CLASS_RmClient_TYPEDEF__ */
#ifndef __nvoc_class_id_RmClient
#define __nvoc_class_id_RmClient 0xb23d83
#endif /* __nvoc_class_id_RmClient */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient;
#define __staticCast_RmClient(pThis) \
((pThis)->__nvoc_pbase_RmClient)
#ifdef __nvoc_client_h_disabled
#define __dynamicCast_RmClient(pThis) ((RmClient*)NULL)
#else //__nvoc_client_h_disabled
#define __dynamicCast_RmClient(pThis) \
((RmClient*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClient)))
#endif //__nvoc_client_h_disabled
NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_RmClient(RmClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_RmClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \
__nvoc_objCreate_RmClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams)
#define rmclientValidate(pClient, pSecInfo) rmclientValidate_DISPATCH(pClient, pSecInfo)
#define rmclientFreeResource(pClient, pServer, pParams) rmclientFreeResource_DISPATCH(pClient, pServer, pParams)
#define rmclientInterMap(pClient, pMapperRef, pMappableRef, pParams) rmclientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams)
#define rmclientInterUnmap(pClient, pMapperRef, pParams) rmclientInterUnmap_DISPATCH(pClient, pMapperRef, pParams)
#define rmclientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) rmclientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef)
#define rmclientDestructResourceRef(pClient, pServer, pResourceRef) rmclientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef)
#define rmclientValidateNewResourceHandle(pClient, hResource, bRestrict) rmclientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict)
#define rmclientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) rmclientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext)
#define rmclientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) rmclientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo)
NV_STATUS rmclientValidate_IMPL(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo);
static inline NV_STATUS rmclientValidate_DISPATCH(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo) {
return pClient->__rmclientValidate__(pClient, pSecInfo);
}
NV_STATUS rmclientFreeResource_IMPL(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams);
static inline NV_STATUS rmclientFreeResource_DISPATCH(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) {
return pClient->__rmclientFreeResource__(pClient, pServer, pParams);
}
NV_STATUS rmclientInterMap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams);
static inline NV_STATUS rmclientInterMap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) {
return pClient->__rmclientInterMap__(pClient, pMapperRef, pMappableRef, pParams);
}
void rmclientInterUnmap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams);
static inline void rmclientInterUnmap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) {
pClient->__rmclientInterUnmap__(pClient, pMapperRef, pParams);
}
NV_STATUS rmclientPostProcessPendingFreeList_IMPL(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef);
static inline NV_STATUS rmclientPostProcessPendingFreeList_DISPATCH(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef) {
return pClient->__rmclientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef);
}
static inline NV_STATUS rmclientDestructResourceRef_DISPATCH(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) {
return pClient->__rmclientDestructResourceRef__(pClient, pServer, pResourceRef);
}
static inline NV_STATUS rmclientValidateNewResourceHandle_DISPATCH(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) {
return pClient->__rmclientValidateNewResourceHandle__(pClient, hResource, bRestrict);
}
static inline NV_STATUS rmclientShareResource_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) {
return pClient->__rmclientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext);
}
static inline NV_STATUS rmclientUnmapMemory_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) {
return pClient->__rmclientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo);
}
NV_STATUS rmclientConstruct_IMPL(struct RmClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_rmclientConstruct(arg_pClient, arg_pAllocator, arg_pParams) rmclientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams)
void rmclientDestruct_IMPL(struct RmClient *pClient);
#define __nvoc_rmclientDestruct(pClient) rmclientDestruct_IMPL(pClient)
RS_PRIV_LEVEL rmclientGetCachedPrivilege_IMPL(struct RmClient *pClient);
#ifdef __nvoc_client_h_disabled
static inline RS_PRIV_LEVEL rmclientGetCachedPrivilege(struct RmClient *pClient) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
RS_PRIV_LEVEL ret;
portMemSet(&ret, 0, sizeof(RS_PRIV_LEVEL));
return ret;
}
#else //__nvoc_client_h_disabled
#define rmclientGetCachedPrivilege(pClient) rmclientGetCachedPrivilege_IMPL(pClient)
#endif //__nvoc_client_h_disabled
NvBool rmclientIsAdmin_IMPL(struct RmClient *pClient, RS_PRIV_LEVEL privLevel);
#ifdef __nvoc_client_h_disabled
static inline NvBool rmclientIsAdmin(struct RmClient *pClient, RS_PRIV_LEVEL privLevel) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
return NV_FALSE;
}
#else //__nvoc_client_h_disabled
#define rmclientIsAdmin(pClient, privLevel) rmclientIsAdmin_IMPL(pClient, privLevel)
#endif //__nvoc_client_h_disabled
void rmclientSetClientFlags_IMPL(struct RmClient *pClient, NvU32 clientFlags);
#ifdef __nvoc_client_h_disabled
static inline void rmclientSetClientFlags(struct RmClient *pClient, NvU32 clientFlags) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
}
#else //__nvoc_client_h_disabled
#define rmclientSetClientFlags(pClient, clientFlags) rmclientSetClientFlags_IMPL(pClient, clientFlags)
#endif //__nvoc_client_h_disabled
void *rmclientGetSecurityToken_IMPL(struct RmClient *pClient);
#ifdef __nvoc_client_h_disabled
static inline void *rmclientGetSecurityToken(struct RmClient *pClient) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
return NULL;
}
#else //__nvoc_client_h_disabled
#define rmclientGetSecurityToken(pClient) rmclientGetSecurityToken_IMPL(pClient)
#endif //__nvoc_client_h_disabled
NvBool rmclientIsCapableOrAdmin_IMPL(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel);
#ifdef __nvoc_client_h_disabled
static inline NvBool rmclientIsCapableOrAdmin(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
return NV_FALSE;
}
#else //__nvoc_client_h_disabled
#define rmclientIsCapableOrAdmin(pClient, capability, privLevel) rmclientIsCapableOrAdmin_IMPL(pClient, capability, privLevel)
#endif //__nvoc_client_h_disabled
NvBool rmclientIsCapable_IMPL(struct RmClient *pClient, NvU32 capability);
#ifdef __nvoc_client_h_disabled
static inline NvBool rmclientIsCapable(struct RmClient *pClient, NvU32 capability) {
NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!");
return NV_FALSE;
}
#else //__nvoc_client_h_disabled
#define rmclientIsCapable(pClient, capability) rmclientIsCapable_IMPL(pClient, capability)
#endif //__nvoc_client_h_disabled
#undef PRIVATE_FIELD
MAKE_LIST(RmClientList, RmClient*);
extern RmClientList g_clientListBehindGpusLock;
MAKE_LIST(UserInfoList, UserInfo*);
extern UserInfoList g_userInfoList;
//
// Convenience rmclientXxxByHandle util macros. Ideally, code operates on
// pClient directly instead of hClient but providing these for compatibility
// to hClient-heavy code.
//
RS_PRIV_LEVEL rmclientGetCachedPrivilegeByHandle(NvHandle hClient);
NvBool rmclientIsAdminByHandle(NvHandle hClient, RS_PRIV_LEVEL privLevel);
NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags);
void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState);
void *rmclientGetSecurityTokenByHandle(NvHandle hClient);
NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo);
NvBool rmclientIsCapableOrAdminByHandle(NvHandle hClient, NvU32 capability, RS_PRIV_LEVEL privLevel);
NvBool rmclientIsCapableByHandle(NvHandle hClient, NvU32 capability);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CLIENT_NVOC_H_

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,635 @@
#ifndef _G_CLIENT_RESOURCE_NVOC_H_
#define _G_CLIENT_RESOURCE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_client_resource_nvoc.h"
#ifndef _CLIENT_RESOURCE_H_
#define _CLIENT_RESOURCE_H_
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "resserv/rs_client.h"
#include "rmapi/resource.h"
#include "rmapi/event.h"
#include "rmapi/control.h"
#include "ctrl/ctrl0000/ctrl0000gpu.h"
#include "ctrl/ctrl0000/ctrl0000gpuacct.h"
#include "ctrl/ctrl0000/ctrl0000gsync.h"
#include "ctrl/ctrl0000/ctrl0000diag.h"
#include "ctrl/ctrl0000/ctrl0000event.h"
#include "ctrl/ctrl0000/ctrl0000nvd.h"
#include "ctrl/ctrl0000/ctrl0000proc.h"
#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h"
#include "ctrl/ctrl0000/ctrl0000gspc.h"
#include "ctrl/ctrl0000/ctrl0000vgpu.h"
#include "ctrl/ctrl0000/ctrl0000client.h"
/* include appropriate os-specific command header */
#if defined(NV_UNIX) || defined(NV_QNX)
#include "ctrl/ctrl0000/ctrl0000unix.h"
#endif
#ifdef NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct RmClientResource {
const struct NVOC_RTTI *__nvoc_rtti;
struct RsClientResource __nvoc_base_RsClientResource;
struct RmResourceCommon __nvoc_base_RmResourceCommon;
struct Notifier __nvoc_base_Notifier;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RsClientResource *__nvoc_pbase_RsClientResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
struct RmClientResource *__nvoc_pbase_RmClientResource;
NvBool (*__cliresAccessCallback__)(struct RmClientResource *, struct RsClient *, void *, RsAccessRight);
NvBool (*__cliresShareCallback__)(struct RmClientResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__cliresCtrlCmdSystemGetCpuInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetFeatures__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetBuildVersionV2__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemSetMemorySize__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetClassList__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemNotifyEvent__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemDebugCtrlRmMsg__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetPrivilegedStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetFabricStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetRmInstanceId__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemGetClientDatabaseInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientGetAddrSpaceType__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientGetHandleInfo__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientGetAccessRights__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientSetInheritedSharePolicy__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientShareObject__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *);
NV_STATUS (*__cliresCtrlCmdClientGetChildHandle__)(struct RmClientResource *, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetIdInfoV2__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetInitStatus__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetDeviceIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetProbedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuAttachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuDetachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetSvmSize__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetPciInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetUuidInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetUuidFromGpuId__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuModifyGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuQueryGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuGetMemOpEnable__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGpuDisableNvlinkInit__)(struct RmClientResource *, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *);
NV_STATUS (*__cliresCtrlCmdLegacyConfig__)(struct RmClientResource *, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGsyncGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdGsyncGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdEventSetNotification__)(struct RmClientResource *, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdEventGetSystemEventStatus__)(struct RmClientResource *, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixGetExportObjectInfo__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixCreateExportObjectFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectsToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectsFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *);
NV_STATUS (*__cliresCtrlCmdOsUnixFlushUserCache__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSetSubProcessID__)(struct RmClientResource *, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *);
NV_STATUS (*__cliresCtrlCmdDisableSubProcessUserdIsolation__)(struct RmClientResource *, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemSyncExternalFabricMgmt__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *);
NV_STATUS (*__cliresControl__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__cliresUnmap__)(struct RmClientResource *, struct CALL_CONTEXT *, RsCpuMapping *);
NV_STATUS (*__cliresMapTo__)(struct RmClientResource *, RS_RES_MAP_TO_PARAMS *);
void (*__cliresSetNotificationShare__)(struct RmClientResource *, struct NotifShare *);
NV_STATUS (*__cliresControlFilter__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__cliresAddAdditionalDependants__)(struct RsClient *, struct RmClientResource *, RsResourceRef *);
NvU32 (*__cliresGetRefCount__)(struct RmClientResource *);
NV_STATUS (*__cliresUnregisterEvent__)(struct RmClientResource *, NvHandle, NvHandle, NvHandle, NvHandle);
NvBool (*__cliresCanCopy__)(struct RmClientResource *);
NV_STATUS (*__cliresControl_Prologue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__cliresPreDestruct__)(struct RmClientResource *);
NV_STATUS (*__cliresUnmapFrom__)(struct RmClientResource *, RS_RES_UNMAP_FROM_PARAMS *);
PEVENTNOTIFICATION *(*__cliresGetNotificationListPtr__)(struct RmClientResource *);
void (*__cliresControl_Epilogue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
struct NotifShare *(*__cliresGetNotificationShare__)(struct RmClientResource *);
NV_STATUS (*__cliresControlLookup__)(struct RmClientResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__cliresMap__)(struct RmClientResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NV_STATUS (*__cliresGetOrAllocNotifShare__)(struct RmClientResource *, NvHandle, NvHandle, struct NotifShare **);
};
#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__
#define __NVOC_CLASS_RmClientResource_TYPEDEF__
typedef struct RmClientResource RmClientResource;
#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */
#ifndef __nvoc_class_id_RmClientResource
#define __nvoc_class_id_RmClientResource 0x37a701
#endif /* __nvoc_class_id_RmClientResource */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource;
#define __staticCast_RmClientResource(pThis) \
((pThis)->__nvoc_pbase_RmClientResource)
#ifdef __nvoc_client_resource_h_disabled
#define __dynamicCast_RmClientResource(pThis) ((RmClientResource*)NULL)
#else //__nvoc_client_resource_h_disabled
#define __dynamicCast_RmClientResource(pThis) \
((RmClientResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClientResource)))
#endif //__nvoc_client_resource_h_disabled
NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_RmClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_RmClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define cliresAccessCallback(pRmCliRes, pInvokingClient, pAllocParams, accessRight) cliresAccessCallback_DISPATCH(pRmCliRes, pInvokingClient, pAllocParams, accessRight)
#define cliresShareCallback(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) cliresShareCallback_DISPATCH(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy)
#define cliresCtrlCmdSystemGetCpuInfo(pRmCliRes, pCpuInfoParams) cliresCtrlCmdSystemGetCpuInfo_DISPATCH(pRmCliRes, pCpuInfoParams)
#define cliresCtrlCmdSystemGetFeatures(pRmCliRes, pParams) cliresCtrlCmdSystemGetFeatures_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemGetBuildVersionV2(pRmCliRes, pParams) cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemSetMemorySize(pRmCliRes, pParams) cliresCtrlCmdSystemSetMemorySize_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemGetClassList(pRmCliRes, pParams) cliresCtrlCmdSystemGetClassList_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemNotifyEvent(pRmCliRes, pParams) cliresCtrlCmdSystemNotifyEvent_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemDebugCtrlRmMsg(pRmCliRes, pParams) cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemGetPrivilegedStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemGetFabricStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetFabricStatus_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemGetRmInstanceId(pRmCliRes, pRmInstanceIdParams) cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(pRmCliRes, pRmInstanceIdParams)
#define cliresCtrlCmdSystemGetClientDatabaseInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientGetAddrSpaceType(pRmCliRes, pParams) cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientGetHandleInfo(pRmCliRes, pParams) cliresCtrlCmdClientGetHandleInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientGetAccessRights(pRmCliRes, pParams) cliresCtrlCmdClientGetAccessRights_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientSetInheritedSharePolicy(pRmCliRes, pParams) cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientShareObject(pRmCliRes, pParams) cliresCtrlCmdClientShareObject_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdClientGetChildHandle(pRmCliRes, pParams) cliresCtrlCmdClientGetChildHandle_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGpuGetAttachedIds(pRmCliRes, pGpuAttachedIds) cliresCtrlCmdGpuGetAttachedIds_DISPATCH(pRmCliRes, pGpuAttachedIds)
#define cliresCtrlCmdGpuGetIdInfo(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfo_DISPATCH(pRmCliRes, pGpuIdInfoParams)
#define cliresCtrlCmdGpuGetIdInfoV2(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(pRmCliRes, pGpuIdInfoParams)
#define cliresCtrlCmdGpuGetInitStatus(pRmCliRes, pGpuInitStatusParams) cliresCtrlCmdGpuGetInitStatus_DISPATCH(pRmCliRes, pGpuInitStatusParams)
#define cliresCtrlCmdGpuGetDeviceIds(pRmCliRes, pDeviceIdsParams) cliresCtrlCmdGpuGetDeviceIds_DISPATCH(pRmCliRes, pDeviceIdsParams)
#define cliresCtrlCmdGpuGetProbedIds(pRmCliRes, pGpuProbedIds) cliresCtrlCmdGpuGetProbedIds_DISPATCH(pRmCliRes, pGpuProbedIds)
#define cliresCtrlCmdGpuAttachIds(pRmCliRes, pGpuAttachIds) cliresCtrlCmdGpuAttachIds_DISPATCH(pRmCliRes, pGpuAttachIds)
#define cliresCtrlCmdGpuDetachIds(pRmCliRes, pGpuDetachIds) cliresCtrlCmdGpuDetachIds_DISPATCH(pRmCliRes, pGpuDetachIds)
#define cliresCtrlCmdGpuGetSvmSize(pRmCliRes, pSvmSizeGetParams) cliresCtrlCmdGpuGetSvmSize_DISPATCH(pRmCliRes, pSvmSizeGetParams)
#define cliresCtrlCmdGpuGetPciInfo(pRmCliRes, pPciInfoParams) cliresCtrlCmdGpuGetPciInfo_DISPATCH(pRmCliRes, pPciInfoParams)
#define cliresCtrlCmdGpuGetUuidInfo(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGpuGetUuidFromGpuId(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGpuModifyGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGpuQueryGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGpuGetMemOpEnable(pRmCliRes, pMemOpEnableParams) cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(pRmCliRes, pMemOpEnableParams)
#define cliresCtrlCmdGpuDisableNvlinkInit(pRmCliRes, pParams) cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdLegacyConfig(pRmCliRes, pParams) cliresCtrlCmdLegacyConfig_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdGsyncGetAttachedIds(pRmCliRes, pGsyncAttachedIds) cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(pRmCliRes, pGsyncAttachedIds)
#define cliresCtrlCmdGsyncGetIdInfo(pRmCliRes, pGsyncIdInfoParams) cliresCtrlCmdGsyncGetIdInfo_DISPATCH(pRmCliRes, pGsyncIdInfoParams)
#define cliresCtrlCmdEventSetNotification(pRmCliRes, pEventSetNotificationParams) cliresCtrlCmdEventSetNotification_DISPATCH(pRmCliRes, pEventSetNotificationParams)
#define cliresCtrlCmdEventGetSystemEventStatus(pRmCliRes, pSystemEventStatusParams) cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(pRmCliRes, pSystemEventStatusParams)
#define cliresCtrlCmdOsUnixExportObjectToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixImportObjectFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixGetExportObjectInfo(pRmCliRes, pParams) cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixCreateExportObjectFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixExportObjectsToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixImportObjectsFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdOsUnixFlushUserCache(pRmCliRes, pAddressSpaceParams) cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(pRmCliRes, pAddressSpaceParams)
#define cliresCtrlCmdSetSubProcessID(pRmCliRes, pParams) cliresCtrlCmdSetSubProcessID_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdDisableSubProcessUserdIsolation(pRmCliRes, pParams) cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemSyncExternalFabricMgmt(pRmCliRes, pExtFabricMgmtParams) cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(pRmCliRes, pExtFabricMgmtParams)
#define cliresControl(pResource, pCallContext, pParams) cliresControl_DISPATCH(pResource, pCallContext, pParams)
#define cliresUnmap(pResource, pCallContext, pCpuMapping) cliresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
#define cliresMapTo(pResource, pParams) cliresMapTo_DISPATCH(pResource, pParams)
#define cliresSetNotificationShare(pNotifier, pNotifShare) cliresSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define cliresControlFilter(pResource, pCallContext, pParams) cliresControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define cliresAddAdditionalDependants(pClient, pResource, pReference) cliresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define cliresGetRefCount(pResource) cliresGetRefCount_DISPATCH(pResource)
#define cliresUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) cliresUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define cliresCanCopy(pResource) cliresCanCopy_DISPATCH(pResource)
#define cliresControl_Prologue(pResource, pCallContext, pParams) cliresControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define cliresPreDestruct(pResource) cliresPreDestruct_DISPATCH(pResource)
#define cliresUnmapFrom(pResource, pParams) cliresUnmapFrom_DISPATCH(pResource, pParams)
#define cliresGetNotificationListPtr(pNotifier) cliresGetNotificationListPtr_DISPATCH(pNotifier)
#define cliresControl_Epilogue(pResource, pCallContext, pParams) cliresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define cliresGetNotificationShare(pNotifier) cliresGetNotificationShare_DISPATCH(pNotifier)
#define cliresControlLookup(pResource, pParams, ppEntry) cliresControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define cliresMap(pResource, pCallContext, pParams, pCpuMapping) cliresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
#define cliresGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) cliresGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
NvBool cliresAccessCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight);
static inline NvBool cliresAccessCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pRmCliRes->__cliresAccessCallback__(pRmCliRes, pInvokingClient, pAllocParams, accessRight);
}
NvBool cliresShareCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy);
static inline NvBool cliresShareCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pRmCliRes->__cliresShareCallback__(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy);
}
NV_STATUS cliresCtrlCmdSystemGetCpuInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams);
static inline NV_STATUS cliresCtrlCmdSystemGetCpuInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetCpuInfo__(pRmCliRes, pCpuInfoParams);
}
NV_STATUS cliresCtrlCmdSystemGetFeatures_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetFeatures_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetFeatures__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetBuildVersionV2__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemSetMemorySize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemSetMemorySize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemSetMemorySize__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemGetClassList_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetClassList_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetClassList__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemNotifyEvent_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemNotifyEvent_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemNotifyEvent__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemDebugCtrlRmMsg__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetPrivilegedStatus__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemGetFabricStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetFabricStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetFabricStatus__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams);
static inline NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetRmInstanceId__(pRmCliRes, pRmInstanceIdParams);
}
NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSystemGetClientDatabaseInfo__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientGetAddrSpaceType__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientGetHandleInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientGetHandleInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientGetHandleInfo__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientGetAccessRights_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientGetAccessRights_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientGetAccessRights__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientSetInheritedSharePolicy__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientShareObject_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientShareObject_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientShareObject__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdClientGetChildHandle_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdClientGetChildHandle_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdClientGetChildHandle__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGpuGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds);
static inline NV_STATUS cliresCtrlCmdGpuGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds) {
return pRmCliRes->__cliresCtrlCmdGpuGetAttachedIds__(pRmCliRes, pGpuAttachedIds);
}
NV_STATUS cliresCtrlCmdGpuGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams);
static inline NV_STATUS cliresCtrlCmdGpuGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetIdInfo__(pRmCliRes, pGpuIdInfoParams);
}
NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams);
static inline NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetIdInfoV2__(pRmCliRes, pGpuIdInfoParams);
}
NV_STATUS cliresCtrlCmdGpuGetInitStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams);
static inline NV_STATUS cliresCtrlCmdGpuGetInitStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetInitStatus__(pRmCliRes, pGpuInitStatusParams);
}
NV_STATUS cliresCtrlCmdGpuGetDeviceIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams);
static inline NV_STATUS cliresCtrlCmdGpuGetDeviceIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetDeviceIds__(pRmCliRes, pDeviceIdsParams);
}
NV_STATUS cliresCtrlCmdGpuGetProbedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds);
static inline NV_STATUS cliresCtrlCmdGpuGetProbedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds) {
return pRmCliRes->__cliresCtrlCmdGpuGetProbedIds__(pRmCliRes, pGpuProbedIds);
}
NV_STATUS cliresCtrlCmdGpuAttachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds);
static inline NV_STATUS cliresCtrlCmdGpuAttachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds) {
return pRmCliRes->__cliresCtrlCmdGpuAttachIds__(pRmCliRes, pGpuAttachIds);
}
NV_STATUS cliresCtrlCmdGpuDetachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds);
static inline NV_STATUS cliresCtrlCmdGpuDetachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds) {
return pRmCliRes->__cliresCtrlCmdGpuDetachIds__(pRmCliRes, pGpuDetachIds);
}
NV_STATUS cliresCtrlCmdGpuGetSvmSize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams);
static inline NV_STATUS cliresCtrlCmdGpuGetSvmSize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetSvmSize__(pRmCliRes, pSvmSizeGetParams);
}
NV_STATUS cliresCtrlCmdGpuGetPciInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams);
static inline NV_STATUS cliresCtrlCmdGpuGetPciInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetPciInfo__(pRmCliRes, pPciInfoParams);
}
NV_STATUS cliresCtrlCmdGpuGetUuidInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdGpuGetUuidInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetUuidInfo__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetUuidFromGpuId__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdGpuModifyGpuDrainState__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdGpuQueryGpuDrainState__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams);
static inline NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams) {
return pRmCliRes->__cliresCtrlCmdGpuGetMemOpEnable__(pRmCliRes, pMemOpEnableParams);
}
NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdGpuDisableNvlinkInit__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdLegacyConfig_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdLegacyConfig_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdLegacyConfig__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds);
static inline NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds) {
return pRmCliRes->__cliresCtrlCmdGsyncGetAttachedIds__(pRmCliRes, pGsyncAttachedIds);
}
NV_STATUS cliresCtrlCmdGsyncGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams);
static inline NV_STATUS cliresCtrlCmdGsyncGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams) {
return pRmCliRes->__cliresCtrlCmdGsyncGetIdInfo__(pRmCliRes, pGsyncIdInfoParams);
}
NV_STATUS cliresCtrlCmdEventSetNotification_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams);
static inline NV_STATUS cliresCtrlCmdEventSetNotification_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams) {
return pRmCliRes->__cliresCtrlCmdEventSetNotification__(pRmCliRes, pEventSetNotificationParams);
}
NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams);
static inline NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams) {
return pRmCliRes->__cliresCtrlCmdEventGetSystemEventStatus__(pRmCliRes, pSystemEventStatusParams);
}
NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectToFd__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectFromFd__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixGetExportObjectInfo__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixCreateExportObjectFd__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectsToFd__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectsFromFd__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams);
static inline NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams) {
return pRmCliRes->__cliresCtrlCmdOsUnixFlushUserCache__(pRmCliRes, pAddressSpaceParams);
}
NV_STATUS cliresCtrlCmdSetSubProcessID_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSetSubProcessID_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdSetSubProcessID__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams) {
return pRmCliRes->__cliresCtrlCmdDisableSubProcessUserdIsolation__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams);
static inline NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams) {
return pRmCliRes->__cliresCtrlCmdSystemSyncExternalFabricMgmt__(pRmCliRes, pExtFabricMgmtParams);
}
static inline NV_STATUS cliresControl_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__cliresControl__(pResource, pCallContext, pParams);
}
static inline NV_STATUS cliresUnmap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return pResource->__cliresUnmap__(pResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS cliresMapTo_DISPATCH(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__cliresMapTo__(pResource, pParams);
}
static inline void cliresSetNotificationShare_DISPATCH(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__cliresSetNotificationShare__(pNotifier, pNotifShare);
}
static inline NV_STATUS cliresControlFilter_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__cliresControlFilter__(pResource, pCallContext, pParams);
}
static inline void cliresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) {
pResource->__cliresAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 cliresGetRefCount_DISPATCH(struct RmClientResource *pResource) {
return pResource->__cliresGetRefCount__(pResource);
}
static inline NV_STATUS cliresUnregisterEvent_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__cliresUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NvBool cliresCanCopy_DISPATCH(struct RmClientResource *pResource) {
return pResource->__cliresCanCopy__(pResource);
}
static inline NV_STATUS cliresControl_Prologue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__cliresControl_Prologue__(pResource, pCallContext, pParams);
}
static inline void cliresPreDestruct_DISPATCH(struct RmClientResource *pResource) {
pResource->__cliresPreDestruct__(pResource);
}
static inline NV_STATUS cliresUnmapFrom_DISPATCH(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__cliresUnmapFrom__(pResource, pParams);
}
static inline PEVENTNOTIFICATION *cliresGetNotificationListPtr_DISPATCH(struct RmClientResource *pNotifier) {
return pNotifier->__cliresGetNotificationListPtr__(pNotifier);
}
static inline void cliresControl_Epilogue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__cliresControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline struct NotifShare *cliresGetNotificationShare_DISPATCH(struct RmClientResource *pNotifier) {
return pNotifier->__cliresGetNotificationShare__(pNotifier);
}
static inline NV_STATUS cliresControlLookup_DISPATCH(struct RmClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__cliresControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS cliresMap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return pResource->__cliresMap__(pResource, pCallContext, pParams, pCpuMapping);
}
static inline NV_STATUS cliresGetOrAllocNotifShare_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__cliresGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
NV_STATUS cliresConstruct_IMPL(struct RmClientResource *arg_pRmCliRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_cliresConstruct(arg_pRmCliRes, arg_pCallContext, arg_pParams) cliresConstruct_IMPL(arg_pRmCliRes, arg_pCallContext, arg_pParams)
void cliresDestruct_IMPL(struct RmClientResource *pRmCliRes);
#define __nvoc_cliresDestruct(pRmCliRes) cliresDestruct_IMPL(pRmCliRes)
#undef PRIVATE_FIELD
NV_STATUS CliGetSystemP2pCaps(NvU32 *gpuIds,
NvU32 gpuCount,
NvU32 *p2pCaps,
NvU32 *p2pOptimalReadCEs,
NvU32 *p2pOptimalWriteCEs,
NvU8 *p2pCapsStatus,
NvU32 *pBusPeerIds);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CLIENT_RESOURCE_NVOC_H_

View File

@@ -0,0 +1,427 @@
#define NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_context_dma_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x88441b = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
void __nvoc_init_ContextDma(ContextDma*);
void __nvoc_init_funcTable_ContextDma(ContextDma*);
NV_STATUS __nvoc_ctor_ContextDma(ContextDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_ContextDma(ContextDma*);
void __nvoc_dtor_ContextDma(ContextDma*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma;
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_ContextDma = {
/*pClassDef=*/ &__nvoc_class_def_ContextDma,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ContextDma,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_INotifier = {
/*pClassDef=*/ &__nvoc_class_def_INotifier,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier.__nvoc_base_INotifier),
};
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Notifier = {
/*pClassDef=*/ &__nvoc_class_def_Notifier,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_ContextDma = {
/*numRelatives=*/ 7,
/*relatives=*/ {
&__nvoc_rtti_ContextDma_ContextDma,
&__nvoc_rtti_ContextDma_Notifier,
&__nvoc_rtti_ContextDma_INotifier,
&__nvoc_rtti_ContextDma_RmResource,
&__nvoc_rtti_ContextDma_RmResourceCommon,
&__nvoc_rtti_ContextDma_RsResource,
&__nvoc_rtti_ContextDma_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma =
{
/*classInfo=*/ {
/*size=*/ sizeof(ContextDma),
/*classId=*/ classId(ContextDma),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "ContextDma",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ContextDma,
/*pCastInfo=*/ &__nvoc_castinfo_ContextDma,
/*pExportInfo=*/ &__nvoc_export_info_ContextDma
};
static NV_STATUS __nvoc_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) {
return ctxdmaMapTo((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) {
return ctxdmaUnmapFrom((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams);
}
static NvBool __nvoc_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), bSubdeviceHandleProvided);
}
static NvBool __nvoc_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), ppMemDesc);
}
static void __nvoc_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) {
notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), pNotifShare);
}
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams);
}
static NvU32 __nvoc_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pCpuMapping);
}
static NvBool __nvoc_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pReference);
}
static void __nvoc_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
}
static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier) {
return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset));
}
static void __nvoc_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams);
}
static struct NotifShare *__nvoc_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier) {
return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlLookup(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare);
}
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
#endif
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ContextDma[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUpdateContextdma_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*flags=*/ 0x0u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20101u,
/*paramSize=*/ sizeof(NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "ctxdmaCtrlCmdUpdateContextdma"
#endif
},
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdBindContextdma_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20102u,
/*paramSize=*/ sizeof(NV0002_CTRL_BIND_CONTEXTDMA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "ctxdmaCtrlCmdBindContextdma"
#endif
},
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUnbindContextdma_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20103u,
/*paramSize=*/ sizeof(NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "ctxdmaCtrlCmdUnbindContextdma"
#endif
},
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma =
{
/*numEntries=*/ 3,
/*pExportEntries=*/ __nvoc_exported_method_def_ContextDma
};
void __nvoc_dtor_RmResource(RmResource*);
void __nvoc_dtor_Notifier(Notifier*);
void __nvoc_dtor_ContextDma(ContextDma *pThis) {
__nvoc_ctxdmaDestruct(pThis);
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_ContextDma(ContextDma *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *);
NV_STATUS __nvoc_ctor_ContextDma(ContextDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_RmResource;
status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext);
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_Notifier;
__nvoc_init_dataField_ContextDma(pThis);
status = __nvoc_ctxdmaConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail__init;
goto __nvoc_ctor_ContextDma_exit; // Success
__nvoc_ctor_ContextDma_fail__init:
__nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier);
__nvoc_ctor_ContextDma_fail_Notifier:
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_ctor_ContextDma_fail_RmResource:
__nvoc_ctor_ContextDma_exit:
return status;
}
static void __nvoc_init_funcTable_ContextDma_1(ContextDma *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__ctxdmaValidate__ = &ctxdmaValidate_IMPL;
pThis->__ctxdmaGetKernelVA__ = &ctxdmaGetKernelVA_IMPL;
pThis->__ctxdmaMapTo__ = &ctxdmaMapTo_IMPL;
pThis->__ctxdmaUnmapFrom__ = &ctxdmaUnmapFrom_IMPL;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
pThis->__ctxdmaCtrlCmdUpdateContextdma__ = &ctxdmaCtrlCmdUpdateContextdma_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__ctxdmaCtrlCmdBindContextdma__ = &ctxdmaCtrlCmdBindContextdma_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__ctxdmaCtrlCmdUnbindContextdma__ = &ctxdmaCtrlCmdUnbindContextdma_IMPL;
#endif
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMapTo__ = &__nvoc_thunk_ContextDma_resMapTo;
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmapFrom__ = &__nvoc_thunk_ContextDma_resUnmapFrom;
pThis->__ctxdmaShareCallback__ = &__nvoc_thunk_RmResource_ctxdmaShareCallback;
pThis->__ctxdmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap;
pThis->__ctxdmaAccessCallback__ = &__nvoc_thunk_RmResource_ctxdmaAccessCallback;
pThis->__ctxdmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams;
pThis->__ctxdmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor;
pThis->__ctxdmaSetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaSetNotificationShare;
pThis->__ctxdmaControl__ = &__nvoc_thunk_RsResource_ctxdmaControl;
pThis->__ctxdmaControlFilter__ = &__nvoc_thunk_RsResource_ctxdmaControlFilter;
pThis->__ctxdmaGetRefCount__ = &__nvoc_thunk_RsResource_ctxdmaGetRefCount;
pThis->__ctxdmaUnregisterEvent__ = &__nvoc_thunk_Notifier_ctxdmaUnregisterEvent;
pThis->__ctxdmaUnmap__ = &__nvoc_thunk_RsResource_ctxdmaUnmap;
pThis->__ctxdmaCanCopy__ = &__nvoc_thunk_RsResource_ctxdmaCanCopy;
pThis->__ctxdmaControl_Prologue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Prologue;
pThis->__ctxdmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants;
pThis->__ctxdmaPreDestruct__ = &__nvoc_thunk_RsResource_ctxdmaPreDestruct;
pThis->__ctxdmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr;
pThis->__ctxdmaControl_Epilogue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Epilogue;
pThis->__ctxdmaGetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationShare;
pThis->__ctxdmaControlLookup__ = &__nvoc_thunk_RsResource_ctxdmaControlLookup;
pThis->__ctxdmaMap__ = &__nvoc_thunk_RsResource_ctxdmaMap;
pThis->__ctxdmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare;
}
void __nvoc_init_funcTable_ContextDma(ContextDma *pThis) {
__nvoc_init_funcTable_ContextDma_1(pThis);
}
void __nvoc_init_RmResource(RmResource*);
void __nvoc_init_Notifier(Notifier*);
void __nvoc_init_ContextDma(ContextDma *pThis) {
pThis->__nvoc_pbase_ContextDma = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier;
pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier;
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_init_Notifier(&pThis->__nvoc_base_Notifier);
__nvoc_init_funcTable_ContextDma(pThis);
}
NV_STATUS __nvoc_objCreate_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
ContextDma *pThis;
pThis = portMemAllocNonPaged(sizeof(ContextDma));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(ContextDma));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ContextDma);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_ContextDma(pThis);
status = __nvoc_ctor_ContextDma(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_ContextDma_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_ContextDma_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_ContextDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,356 @@
#ifndef _G_CONTEXT_DMA_NVOC_H_
#define _G_CONTEXT_DMA_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_context_dma_nvoc.h"
#ifndef CONTEXT_DMA_H
#define CONTEXT_DMA_H
#include "core/core.h"
#include "gpu/mem_mgr/mem_desc.h"
#include "rmapi/resource.h"
#include "rmapi/event.h"
#include "ctrl/ctrl0002.h"
#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc.
#include "nvlimits.h"
struct Device;
#ifndef __NVOC_CLASS_Device_TYPEDEF__
#define __NVOC_CLASS_Device_TYPEDEF__
typedef struct Device Device;
#endif /* __NVOC_CLASS_Device_TYPEDEF__ */
#ifndef __nvoc_class_id_Device
#define __nvoc_class_id_Device 0xe0ac20
#endif /* __nvoc_class_id_Device */
struct Memory;
#ifndef __NVOC_CLASS_Memory_TYPEDEF__
#define __NVOC_CLASS_Memory_TYPEDEF__
typedef struct Memory Memory;
#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
#ifndef __nvoc_class_id_Memory
#define __nvoc_class_id_Memory 0x4789f2
#endif /* __nvoc_class_id_Memory */
/*!
* RM internal class representing NV01_CONTEXT_DMA
*/
#ifdef NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct ContextDma {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmResource __nvoc_base_RmResource;
struct Notifier __nvoc_base_Notifier;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
struct ContextDma *__nvoc_pbase_ContextDma;
NV_STATUS (*__ctxdmaValidate__)(struct ContextDma *, NvU64, NvU64);
NV_STATUS (*__ctxdmaGetKernelVA__)(struct ContextDma *, NvU64, NvU64, void **, NvU32);
NV_STATUS (*__ctxdmaMapTo__)(struct ContextDma *, struct RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__ctxdmaUnmapFrom__)(struct ContextDma *, struct RS_RES_UNMAP_FROM_PARAMS *);
NV_STATUS (*__ctxdmaCtrlCmdUpdateContextdma__)(struct ContextDma *, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *);
NV_STATUS (*__ctxdmaCtrlCmdBindContextdma__)(struct ContextDma *, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *);
NV_STATUS (*__ctxdmaCtrlCmdUnbindContextdma__)(struct ContextDma *, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *);
NvBool (*__ctxdmaShareCallback__)(struct ContextDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__ctxdmaCheckMemInterUnmap__)(struct ContextDma *, NvBool);
NvBool (*__ctxdmaAccessCallback__)(struct ContextDma *, struct RsClient *, void *, RsAccessRight);
NV_STATUS (*__ctxdmaGetMemInterMapParams__)(struct ContextDma *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__ctxdmaGetMemoryMappingDescriptor__)(struct ContextDma *, struct MEMORY_DESCRIPTOR **);
void (*__ctxdmaSetNotificationShare__)(struct ContextDma *, struct NotifShare *);
NV_STATUS (*__ctxdmaControl__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__ctxdmaControlFilter__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvU32 (*__ctxdmaGetRefCount__)(struct ContextDma *);
NV_STATUS (*__ctxdmaUnregisterEvent__)(struct ContextDma *, NvHandle, NvHandle, NvHandle, NvHandle);
NV_STATUS (*__ctxdmaUnmap__)(struct ContextDma *, struct CALL_CONTEXT *, RsCpuMapping *);
NvBool (*__ctxdmaCanCopy__)(struct ContextDma *);
NV_STATUS (*__ctxdmaControl_Prologue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__ctxdmaAddAdditionalDependants__)(struct RsClient *, struct ContextDma *, RsResourceRef *);
void (*__ctxdmaPreDestruct__)(struct ContextDma *);
PEVENTNOTIFICATION *(*__ctxdmaGetNotificationListPtr__)(struct ContextDma *);
void (*__ctxdmaControl_Epilogue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
struct NotifShare *(*__ctxdmaGetNotificationShare__)(struct ContextDma *);
NV_STATUS (*__ctxdmaControlLookup__)(struct ContextDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__ctxdmaMap__)(struct ContextDma *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NV_STATUS (*__ctxdmaGetOrAllocNotifShare__)(struct ContextDma *, NvHandle, NvHandle, struct NotifShare **);
NvU32 Class;
NvU32 Flags;
NvBool bReadOnly;
NvU32 CacheSnoop;
NvU32 Type;
NvU64 Limit;
NV_ADDRESS_SPACE AddressSpace;
NvBool bUnicast;
void *KernelVAddr[8];
void *KernelPriv;
NvU64 FbAperture[8];
NvU64 FbApertureLen[8];
struct Memory *pMemory;
struct MEMORY_DESCRIPTOR *pMemDesc;
NvU32 Instance[8];
NvU32 InstRefCount[8];
struct OBJGPU *pGpu;
struct Device *pDevice;
};
#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__
#define __NVOC_CLASS_ContextDma_TYPEDEF__
typedef struct ContextDma ContextDma;
#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */
#ifndef __nvoc_class_id_ContextDma
#define __nvoc_class_id_ContextDma 0x88441b
#endif /* __nvoc_class_id_ContextDma */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma;
#define __staticCast_ContextDma(pThis) \
((pThis)->__nvoc_pbase_ContextDma)
#ifdef __nvoc_context_dma_h_disabled
#define __dynamicCast_ContextDma(pThis) ((ContextDma*)NULL)
#else //__nvoc_context_dma_h_disabled
#define __dynamicCast_ContextDma(pThis) \
((ContextDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ContextDma)))
#endif //__nvoc_context_dma_h_disabled
NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_ContextDma(ContextDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_ContextDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_ContextDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define ctxdmaValidate(pContextDma, start, len) ctxdmaValidate_DISPATCH(pContextDma, start, len)
#define ctxdmaGetKernelVA(pContextDma, start, len, arg0, VA_idx) ctxdmaGetKernelVA_DISPATCH(pContextDma, start, len, arg0, VA_idx)
#define ctxdmaMapTo(pContextDma, pParams) ctxdmaMapTo_DISPATCH(pContextDma, pParams)
#define ctxdmaUnmapFrom(pContextDma, pParams) ctxdmaUnmapFrom_DISPATCH(pContextDma, pParams)
#define ctxdmaCtrlCmdUpdateContextdma(pContextDma, pUpdateCtxtDmaParams) ctxdmaCtrlCmdUpdateContextdma_DISPATCH(pContextDma, pUpdateCtxtDmaParams)
#define ctxdmaCtrlCmdBindContextdma(pContextDma, pBindCtxtDmaParams) ctxdmaCtrlCmdBindContextdma_DISPATCH(pContextDma, pBindCtxtDmaParams)
#define ctxdmaCtrlCmdUnbindContextdma(pContextDma, pUnbindCtxtDmaParams) ctxdmaCtrlCmdUnbindContextdma_DISPATCH(pContextDma, pUnbindCtxtDmaParams)
#define ctxdmaShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) ctxdmaShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
#define ctxdmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ctxdmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define ctxdmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ctxdmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
#define ctxdmaGetMemInterMapParams(pRmResource, pParams) ctxdmaGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define ctxdmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ctxdmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define ctxdmaSetNotificationShare(pNotifier, pNotifShare) ctxdmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define ctxdmaControl(pResource, pCallContext, pParams) ctxdmaControl_DISPATCH(pResource, pCallContext, pParams)
#define ctxdmaControlFilter(pResource, pCallContext, pParams) ctxdmaControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define ctxdmaGetRefCount(pResource) ctxdmaGetRefCount_DISPATCH(pResource)
#define ctxdmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ctxdmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define ctxdmaUnmap(pResource, pCallContext, pCpuMapping) ctxdmaUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
#define ctxdmaCanCopy(pResource) ctxdmaCanCopy_DISPATCH(pResource)
#define ctxdmaControl_Prologue(pResource, pCallContext, pParams) ctxdmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define ctxdmaAddAdditionalDependants(pClient, pResource, pReference) ctxdmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define ctxdmaPreDestruct(pResource) ctxdmaPreDestruct_DISPATCH(pResource)
#define ctxdmaGetNotificationListPtr(pNotifier) ctxdmaGetNotificationListPtr_DISPATCH(pNotifier)
#define ctxdmaControl_Epilogue(pResource, pCallContext, pParams) ctxdmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define ctxdmaGetNotificationShare(pNotifier) ctxdmaGetNotificationShare_DISPATCH(pNotifier)
#define ctxdmaControlLookup(pResource, pParams, ppEntry) ctxdmaControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define ctxdmaMap(pResource, pCallContext, pParams, pCpuMapping) ctxdmaMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
#define ctxdmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ctxdmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
NV_STATUS ctxdmaValidate_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len);
static inline NV_STATUS ctxdmaValidate_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len) {
return pContextDma->__ctxdmaValidate__(pContextDma, start, len);
}
NV_STATUS ctxdmaGetKernelVA_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx);
static inline NV_STATUS ctxdmaGetKernelVA_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx) {
return pContextDma->__ctxdmaGetKernelVA__(pContextDma, start, len, arg0, VA_idx);
}
NV_STATUS ctxdmaMapTo_IMPL(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams);
static inline NV_STATUS ctxdmaMapTo_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) {
return pContextDma->__ctxdmaMapTo__(pContextDma, pParams);
}
NV_STATUS ctxdmaUnmapFrom_IMPL(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams);
static inline NV_STATUS ctxdmaUnmapFrom_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pContextDma->__ctxdmaUnmapFrom__(pContextDma, pParams);
}
NV_STATUS ctxdmaCtrlCmdUpdateContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams);
static inline NV_STATUS ctxdmaCtrlCmdUpdateContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams) {
return pContextDma->__ctxdmaCtrlCmdUpdateContextdma__(pContextDma, pUpdateCtxtDmaParams);
}
NV_STATUS ctxdmaCtrlCmdBindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams);
static inline NV_STATUS ctxdmaCtrlCmdBindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams) {
return pContextDma->__ctxdmaCtrlCmdBindContextdma__(pContextDma, pBindCtxtDmaParams);
}
NV_STATUS ctxdmaCtrlCmdUnbindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams);
static inline NV_STATUS ctxdmaCtrlCmdUnbindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams) {
return pContextDma->__ctxdmaCtrlCmdUnbindContextdma__(pContextDma, pUnbindCtxtDmaParams);
}
static inline NvBool ctxdmaShareCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pResource->__ctxdmaShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS ctxdmaCheckMemInterUnmap_DISPATCH(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__ctxdmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NvBool ctxdmaAccessCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__ctxdmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
static inline NV_STATUS ctxdmaGetMemInterMapParams_DISPATCH(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__ctxdmaGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS ctxdmaGetMemoryMappingDescriptor_DISPATCH(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__ctxdmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline void ctxdmaSetNotificationShare_DISPATCH(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__ctxdmaSetNotificationShare__(pNotifier, pNotifShare);
}
static inline NV_STATUS ctxdmaControl_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__ctxdmaControl__(pResource, pCallContext, pParams);
}
static inline NV_STATUS ctxdmaControlFilter_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__ctxdmaControlFilter__(pResource, pCallContext, pParams);
}
static inline NvU32 ctxdmaGetRefCount_DISPATCH(struct ContextDma *pResource) {
return pResource->__ctxdmaGetRefCount__(pResource);
}
static inline NV_STATUS ctxdmaUnregisterEvent_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__ctxdmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NV_STATUS ctxdmaUnmap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return pResource->__ctxdmaUnmap__(pResource, pCallContext, pCpuMapping);
}
static inline NvBool ctxdmaCanCopy_DISPATCH(struct ContextDma *pResource) {
return pResource->__ctxdmaCanCopy__(pResource);
}
static inline NV_STATUS ctxdmaControl_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__ctxdmaControl_Prologue__(pResource, pCallContext, pParams);
}
static inline void ctxdmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) {
pResource->__ctxdmaAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline void ctxdmaPreDestruct_DISPATCH(struct ContextDma *pResource) {
pResource->__ctxdmaPreDestruct__(pResource);
}
static inline PEVENTNOTIFICATION *ctxdmaGetNotificationListPtr_DISPATCH(struct ContextDma *pNotifier) {
return pNotifier->__ctxdmaGetNotificationListPtr__(pNotifier);
}
static inline void ctxdmaControl_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__ctxdmaControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline struct NotifShare *ctxdmaGetNotificationShare_DISPATCH(struct ContextDma *pNotifier) {
return pNotifier->__ctxdmaGetNotificationShare__(pNotifier);
}
static inline NV_STATUS ctxdmaControlLookup_DISPATCH(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__ctxdmaControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS ctxdmaMap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return pResource->__ctxdmaMap__(pResource, pCallContext, pParams, pCpuMapping);
}
static inline NV_STATUS ctxdmaGetOrAllocNotifShare_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__ctxdmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
NV_STATUS ctxdmaConstruct_IMPL(struct ContextDma *arg_pCtxdma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_ctxdmaConstruct(arg_pCtxdma, arg_pCallContext, arg_pParams) ctxdmaConstruct_IMPL(arg_pCtxdma, arg_pCallContext, arg_pParams)
void ctxdmaDestruct_IMPL(struct ContextDma *pCtxdma);
#define __nvoc_ctxdmaDestruct(pCtxdma) ctxdmaDestruct_IMPL(pCtxdma)
NvBool ctxdmaIsBound_IMPL(struct ContextDma *pContextDma);
#ifdef __nvoc_context_dma_h_disabled
static inline NvBool ctxdmaIsBound(struct ContextDma *pContextDma) {
NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!");
return NV_FALSE;
}
#else //__nvoc_context_dma_h_disabled
#define ctxdmaIsBound(pContextDma) ctxdmaIsBound_IMPL(pContextDma)
#endif //__nvoc_context_dma_h_disabled
NV_STATUS ctxdmaGetByHandle_IMPL(struct RsClient *pClient, NvHandle hContextDma, struct ContextDma **arg0);
#define ctxdmaGetByHandle(pClient, hContextDma, arg0) ctxdmaGetByHandle_IMPL(pClient, hContextDma, arg0)
#undef PRIVATE_FIELD
// ****************************************************************************
// Deprecated Definitions
// ****************************************************************************
#if RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS == 1
/**
* @warning This function is deprecated! Please use ctxdmaGetByHandle.
*/
NV_STATUS CliGetContextDma(NvHandle hClient, NvHandle hContextDma, struct ContextDma **);
#endif
#endif /* CONTEXT_DMA_H */
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CONTEXT_DMA_NVOC_H_

View File

@@ -0,0 +1,286 @@
#define NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_dce_client_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x61649c = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM*);
void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM*);
NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM*);
void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM*);
void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM;
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM = {
/*pClassDef=*/ &__nvoc_class_def_OBJDCECLIENTRM,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJDCECLIENTRM,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE = {
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJDCECLIENTRM = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM,
&__nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE,
&__nvoc_rtti_OBJDCECLIENTRM_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJDCECLIENTRM),
/*classId=*/ classId(OBJDCECLIENTRM),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJDCECLIENTRM",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJDCECLIENTRM,
/*pCastInfo=*/ &__nvoc_castinfo_OBJDCECLIENTRM,
/*pExportInfo=*/ &__nvoc_export_info_OBJDCECLIENTRM
};
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) {
return dceclientConstructEngine(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
}
static void __nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy(struct OBJGPU *arg0, struct OBJENGSTATE *arg1) {
dceclientStateDestroy(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
return dceclientStateLoad(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
}
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
return dceclientStateUnload(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static void __nvoc_thunk_OBJENGSTATE_dceclientInitMissing(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientGetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) {
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunables1, pTunables2);
}
static void __nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) {
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), ppTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientSetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
}
static NvBool __nvoc_thunk_OBJENGSTATE_dceclientIsPresent(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
if (status != NV_OK) goto __nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE;
__nvoc_init_dataField_OBJDCECLIENTRM(pThis);
goto __nvoc_ctor_OBJDCECLIENTRM_exit; // Success
__nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE:
__nvoc_ctor_OBJDCECLIENTRM_exit:
return status;
}
static void __nvoc_init_funcTable_OBJDCECLIENTRM_1(OBJDCECLIENTRM *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__dceclientConstructEngine__ = &dceclientConstructEngine_IMPL;
pThis->__dceclientStateDestroy__ = &dceclientStateDestroy_IMPL;
pThis->__dceclientStateLoad__ = &dceclientStateLoad_IMPL;
pThis->__dceclientStateUnload__ = &dceclientStateUnload_IMPL;
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload;
pThis->__dceclientReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState;
pThis->__dceclientStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked;
pThis->__dceclientStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad;
pThis->__dceclientStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload;
pThis->__dceclientStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload;
pThis->__dceclientStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked;
pThis->__dceclientInitMissing__ = &__nvoc_thunk_OBJENGSTATE_dceclientInitMissing;
pThis->__dceclientStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked;
pThis->__dceclientStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked;
pThis->__dceclientGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientGetTunableState;
pThis->__dceclientCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState;
pThis->__dceclientFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState;
pThis->__dceclientStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad;
pThis->__dceclientAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState;
pThis->__dceclientSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientSetTunableState;
pThis->__dceclientIsPresent__ = &__nvoc_thunk_OBJENGSTATE_dceclientIsPresent;
}
void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
__nvoc_init_funcTable_OBJDCECLIENTRM_1(pThis);
}
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
pThis->__nvoc_pbase_OBJDCECLIENTRM = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
__nvoc_init_funcTable_OBJDCECLIENTRM(pThis);
}
NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJDCECLIENTRM *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJDCECLIENTRM));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJDCECLIENTRM));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJDCECLIENTRM);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJDCECLIENTRM(pThis);
status = __nvoc_ctor_OBJDCECLIENTRM(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJDCECLIENTRM_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJDCECLIENTRM_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJDCECLIENTRM(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,377 @@
#ifndef _G_DCE_CLIENT_NVOC_H_
#define _G_DCE_CLIENT_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_dce_client_nvoc.h"
#ifndef _DCE_CLIENT_H_
#define _DCE_CLIENT_H_
/*!
* @file dce_client.h
* @brief Provides definitions for all DceClient data structures and interfaces.
*/
#include "gpu/eng_state.h"
#include "core/core.h"
#include "objrpc.h"
#include "os/dce_rm_client_ipc.h"
#include "class/cl0000.h"
#include "class/cl0080.h"
#include "class/cl2080.h"
#include "class/cl0073.h"
#include "class/cl0005.h"
#include "class/clc372sw.h"
#include "ctrl/ctrl0073/ctrl0073dp.h"
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NV0000_ALLOC_PARAMETERS rootAllocParams;
NvBool valid;
} ROOT;
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NV0080_ALLOC_PARAMETERS deviceAllocParams;
NvBool valid;
} DEVICE;
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NV2080_ALLOC_PARAMETERS subdeviceAllocParams;
NvBool valid;
} SUBDEVICE;
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NVOS21_PARAMETERS displayCommonAllocParams;
NvBool valid;
} DISPLAY_COMMON;
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NVOS21_PARAMETERS displaySWAllocParams;
NvBool valid;
} DISPLAY_SW;
typedef struct
{
NvHandle hClient;
NvHandle hParent;
NvHandle hObject;
NvU32 hClass;
NV0005_ALLOC_PARAMETERS displaySWEventAllocParams;
NvBool valid;
} DISPLAY_SW_EVENT;
typedef struct
{
NvHandle hClient;
NvHandle hObject;
NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams;
NvBool valid;
} DISPLAY_HPD_CTRL;
typedef struct
{
NvHandle hClient;
NvHandle hObject;
NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams;
NvBool valid;
} DISPLAY_DP_SET_MANUAL;
/*!
* Max no of RM clients
*/
#define MAX_RM_CLIENTS 5
/*!
* Temporary alias of DceClient to OBJDCECLIENTRM
*/
#define DceClient OBJDCECLIENTRM
/*!
* Defines the structure used to contain all generic information related to
* the DceClient.
*/
#ifdef NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJDCECLIENTRM {
const struct NVOC_RTTI *__nvoc_rtti;
struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
struct Object *__nvoc_pbase_Object;
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
struct OBJDCECLIENTRM *__nvoc_pbase_OBJDCECLIENTRM;
NV_STATUS (*__dceclientConstructEngine__)(struct OBJGPU *, struct OBJDCECLIENTRM *, ENGDESCRIPTOR);
void (*__dceclientStateDestroy__)(struct OBJGPU *, struct OBJDCECLIENTRM *);
NV_STATUS (*__dceclientStateLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientStateUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientReconcileTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
NV_STATUS (*__dceclientStateInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *);
NV_STATUS (*__dceclientStatePreLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientStatePostUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientStatePreUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientStateInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *);
void (*__dceclientInitMissing__)(POBJGPU, struct OBJDCECLIENTRM *);
NV_STATUS (*__dceclientStatePreInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *);
NV_STATUS (*__dceclientStatePreInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *);
NV_STATUS (*__dceclientGetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
NV_STATUS (*__dceclientCompareTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *, void *);
void (*__dceclientFreeTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
NV_STATUS (*__dceclientStatePostLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
NV_STATUS (*__dceclientAllocTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void **);
NV_STATUS (*__dceclientSetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
NvBool (*__dceclientIsPresent__)(POBJGPU, struct OBJDCECLIENTRM *);
struct OBJRPC *pRpc;
NvU32 clientId[2];
};
#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__
#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__
typedef struct OBJDCECLIENTRM OBJDCECLIENTRM;
#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJDCECLIENTRM
#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c
#endif /* __nvoc_class_id_OBJDCECLIENTRM */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM;
#define __staticCast_OBJDCECLIENTRM(pThis) \
((pThis)->__nvoc_pbase_OBJDCECLIENTRM)
#ifdef __nvoc_dce_client_h_disabled
#define __dynamicCast_OBJDCECLIENTRM(pThis) ((OBJDCECLIENTRM*)NULL)
#else //__nvoc_dce_client_h_disabled
#define __dynamicCast_OBJDCECLIENTRM(pThis) \
((OBJDCECLIENTRM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJDCECLIENTRM)))
#endif //__nvoc_dce_client_h_disabled
#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32);
#define __objCreate_OBJDCECLIENTRM(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJDCECLIENTRM((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define dceclientConstructEngine(arg0, arg1, arg2) dceclientConstructEngine_DISPATCH(arg0, arg1, arg2)
#define dceclientStateDestroy(arg0, arg1) dceclientStateDestroy_DISPATCH(arg0, arg1)
#define dceclientStateLoad(arg0, arg1, arg2) dceclientStateLoad_DISPATCH(arg0, arg1, arg2)
#define dceclientStateUnload(arg0, arg1, arg2) dceclientStateUnload_DISPATCH(arg0, arg1, arg2)
#define dceclientReconcileTunableState(pGpu, pEngstate, pTunableState) dceclientReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define dceclientStateInitLocked(pGpu, pEngstate) dceclientStateInitLocked_DISPATCH(pGpu, pEngstate)
#define dceclientStatePreLoad(pGpu, pEngstate, arg0) dceclientStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define dceclientStatePostUnload(pGpu, pEngstate, arg0) dceclientStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define dceclientStatePreUnload(pGpu, pEngstate, arg0) dceclientStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
#define dceclientStateInitUnlocked(pGpu, pEngstate) dceclientStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define dceclientInitMissing(pGpu, pEngstate) dceclientInitMissing_DISPATCH(pGpu, pEngstate)
#define dceclientStatePreInitLocked(pGpu, pEngstate) dceclientStatePreInitLocked_DISPATCH(pGpu, pEngstate)
#define dceclientStatePreInitUnlocked(pGpu, pEngstate) dceclientStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define dceclientGetTunableState(pGpu, pEngstate, pTunableState) dceclientGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define dceclientCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) dceclientCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
#define dceclientFreeTunableState(pGpu, pEngstate, pTunableState) dceclientFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define dceclientStatePostLoad(pGpu, pEngstate, arg0) dceclientStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
#define dceclientAllocTunableState(pGpu, pEngstate, ppTunableState) dceclientAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
#define dceclientSetTunableState(pGpu, pEngstate, pTunableState) dceclientSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define dceclientIsPresent(pGpu, pEngstate) dceclientIsPresent_DISPATCH(pGpu, pEngstate)
NV_STATUS dceclientConstructEngine_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2);
static inline NV_STATUS dceclientConstructEngine_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2) {
return arg1->__dceclientConstructEngine__(arg0, arg1, arg2);
}
void dceclientStateDestroy_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1);
static inline void dceclientStateDestroy_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) {
arg1->__dceclientStateDestroy__(arg0, arg1);
}
NV_STATUS dceclientStateLoad_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2);
static inline NV_STATUS dceclientStateLoad_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) {
return arg1->__dceclientStateLoad__(arg0, arg1, arg2);
}
NV_STATUS dceclientStateUnload_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2);
static inline NV_STATUS dceclientStateUnload_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) {
return arg1->__dceclientStateUnload__(arg0, arg1, arg2);
}
static inline NV_STATUS dceclientReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return pEngstate->__dceclientReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS dceclientStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return pEngstate->__dceclientStateInitLocked__(pGpu, pEngstate);
}
static inline NV_STATUS dceclientStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return pEngstate->__dceclientStatePreLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS dceclientStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return pEngstate->__dceclientStatePostUnload__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS dceclientStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return pEngstate->__dceclientStatePreUnload__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS dceclientStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return pEngstate->__dceclientStateInitUnlocked__(pGpu, pEngstate);
}
static inline void dceclientInitMissing_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
pEngstate->__dceclientInitMissing__(pGpu, pEngstate);
}
static inline NV_STATUS dceclientStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return pEngstate->__dceclientStatePreInitLocked__(pGpu, pEngstate);
}
static inline NV_STATUS dceclientStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return pEngstate->__dceclientStatePreInitUnlocked__(pGpu, pEngstate);
}
static inline NV_STATUS dceclientGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return pEngstate->__dceclientGetTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS dceclientCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) {
return pEngstate->__dceclientCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
}
static inline void dceclientFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
pEngstate->__dceclientFreeTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS dceclientStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
return pEngstate->__dceclientStatePostLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS dceclientAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) {
return pEngstate->__dceclientAllocTunableState__(pGpu, pEngstate, ppTunableState);
}
static inline NV_STATUS dceclientSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
return pEngstate->__dceclientSetTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NvBool dceclientIsPresent_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
return pEngstate->__dceclientIsPresent__(pGpu, pEngstate);
}
NV_STATUS dceclientInitRpcInfra_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1);
#ifdef __nvoc_dce_client_h_disabled
static inline NV_STATUS dceclientInitRpcInfra(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) {
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_dce_client_h_disabled
#define dceclientInitRpcInfra(arg0, arg1) dceclientInitRpcInfra_IMPL(arg0, arg1)
#endif //__nvoc_dce_client_h_disabled
void dceclientDeinitRpcInfra_IMPL(struct OBJDCECLIENTRM *arg0);
#ifdef __nvoc_dce_client_h_disabled
static inline void dceclientDeinitRpcInfra(struct OBJDCECLIENTRM *arg0) {
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
}
#else //__nvoc_dce_client_h_disabled
#define dceclientDeinitRpcInfra(arg0) dceclientDeinitRpcInfra_IMPL(arg0)
#endif //__nvoc_dce_client_h_disabled
NV_STATUS dceclientDceRmInit_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2);
#ifdef __nvoc_dce_client_h_disabled
static inline NV_STATUS dceclientDceRmInit(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2) {
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_dce_client_h_disabled
#define dceclientDceRmInit(arg0, arg1, arg2) dceclientDceRmInit_IMPL(arg0, arg1, arg2)
#endif //__nvoc_dce_client_h_disabled
NV_STATUS dceclientSendRpc_IMPL(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2);
#ifdef __nvoc_dce_client_h_disabled
static inline NV_STATUS dceclientSendRpc(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2) {
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_dce_client_h_disabled
#define dceclientSendRpc(arg0, arg1, arg2) dceclientSendRpc_IMPL(arg0, arg1, arg2)
#endif //__nvoc_dce_client_h_disabled
#undef PRIVATE_FIELD
NV_STATUS rpcRmApiControl_dce(RM_API *pRmApi,
NvHandle hClient, NvHandle hObject,
NvU32 cmd, void *pParamStructPtr,
NvU32 paramsSize);
NV_STATUS rpcRmApiAlloc_dce(RM_API *pRmApi,
NvHandle hClient, NvHandle hParent,
NvHandle hObject, NvU32 hClass,
void *pAllocParams);
NV_STATUS rpcRmApiDupObject_dce(RM_API *pRmApi, NvHandle hClient,
NvHandle hParent, NvHandle *phObject, NvHandle hClientSrc,
NvHandle hObjectSrc, NvU32 flags);
NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject);
NV_STATUS rpcDceRmInit_dce(RM_API *pRmApi, NvBool bInit);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DCE_CLIENT_NVOC_H_

View File

@@ -0,0 +1,550 @@
#define NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_device_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xe0ac20 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_Device(Device*);
void __nvoc_init_funcTable_Device(Device*);
NV_STATUS __nvoc_ctor_Device(Device*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_Device(Device*);
void __nvoc_dtor_Device(Device*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Device;
static const struct NVOC_RTTI __nvoc_rtti_Device_Device = {
/*pClassDef=*/ &__nvoc_class_def_Device,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Device,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_Device_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_Device_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_Device_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_Device_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_Device_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_Device = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_Device_Device,
&__nvoc_rtti_Device_GpuResource,
&__nvoc_rtti_Device_RmResource,
&__nvoc_rtti_Device_RmResourceCommon,
&__nvoc_rtti_Device_RsResource,
&__nvoc_rtti_Device_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_Device =
{
/*classInfo=*/ {
/*size=*/ sizeof(Device),
/*classId=*/ classId(Device),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "Device",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Device,
/*pCastInfo=*/ &__nvoc_castinfo_Device,
/*pExportInfo=*/ &__nvoc_export_info_Device
};
static NV_STATUS __nvoc_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return deviceControl((struct Device *)(((unsigned char *)pResource) - __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size) {
return deviceInternalControlForward((struct Device *)(((unsigned char *)pDevice) - __nvoc_rtti_Device_GpuResource.offset), command, pParams, size);
}
static NvBool __nvoc_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_deviceGetRefCount(struct Device *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_RsResource_deviceCanCopy(struct Device *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
}
static void __nvoc_thunk_RsResource_devicePreDestruct(struct Device *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_deviceControlLookup(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
#endif
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslist_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*flags=*/ 0x813u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800201u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetClasslist"
#endif
},
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetNumSubdevices_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*flags=*/ 0x811u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800280u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetNumSubdevices"
#endif
},
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
/*flags=*/ 0x5u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800287u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence"
#endif
},
{ /* [3] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*flags=*/ 0x11u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800288u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence"
#endif
},
{ /* [4] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVirtualizationMode_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*flags=*/ 0x810u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800289u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode"
#endif
},
{ /* [5] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslistV2_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*flags=*/ 0x813u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800292u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetClasslistV2"
#endif
},
{ /* [6] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*flags=*/ 0x13u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800293u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle"
#endif
},
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetBrandCaps_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*flags=*/ 0x211u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800294u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuGetBrandCaps"
#endif
},
{ /* [8] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*flags=*/ 0x204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800296u,
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size"
#endif
},
{ /* [9] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTSwitch_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*flags=*/ 0x1u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x801e01u,
/*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTGetFBInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*flags=*/ 0x1u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x801e02u,
/*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 11,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_Device(Device *pThis) {
__nvoc_deviceDestruct(pThis);
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_Device(Device *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_Device(Device *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Device_fail_GpuResource;
__nvoc_init_dataField_Device(pThis);
status = __nvoc_deviceConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Device_fail__init;
goto __nvoc_ctor_Device_exit; // Success
__nvoc_ctor_Device_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_Device_fail_GpuResource:
__nvoc_ctor_Device_exit:
return status;
}
static void __nvoc_init_funcTable_Device_1(Device *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__deviceControl__ = &deviceControl_IMPL;
pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
pThis->__deviceCtrlCmdGpuGetClasslist__ = &deviceCtrlCmdGpuGetClasslist_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
pThis->__deviceCtrlCmdGpuGetClasslistV2__ = &deviceCtrlCmdGpuGetClasslistV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
pThis->__deviceCtrlCmdGpuGetNumSubdevices__ = &deviceCtrlCmdGpuGetNumSubdevices_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
pThis->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ = &deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ = &deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
pThis->__deviceCtrlCmdGpuGetVirtualizationMode__ = &deviceCtrlCmdGpuGetVirtualizationMode_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
pThis->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ = &deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
pThis->__deviceCtrlCmdGpuGetBrandCaps__ = &deviceCtrlCmdGpuGetBrandCaps_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
pThis->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ = &deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
pThis->__deviceCtrlCmdOsUnixVTSwitch__ = &deviceCtrlCmdOsUnixVTSwitch_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
pThis->__deviceCtrlCmdOsUnixVTGetFBInfo__ = &deviceCtrlCmdOsUnixVTGetFBInfo_IMPL;
#endif
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_Device_gpuresControl;
pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Device_gpuresInternalControlForward;
pThis->__deviceShareCallback__ = &__nvoc_thunk_GpuResource_deviceShareCallback;
pThis->__deviceUnmap__ = &__nvoc_thunk_GpuResource_deviceUnmap;
pThis->__deviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_deviceGetMemInterMapParams;
pThis->__deviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor;
pThis->__deviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_deviceGetMapAddrSpace;
pThis->__deviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_deviceGetInternalObjectHandle;
pThis->__deviceControlFilter__ = &__nvoc_thunk_RsResource_deviceControlFilter;
pThis->__deviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_deviceAddAdditionalDependants;
pThis->__deviceGetRefCount__ = &__nvoc_thunk_RsResource_deviceGetRefCount;
pThis->__deviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_deviceCheckMemInterUnmap;
pThis->__deviceMapTo__ = &__nvoc_thunk_RsResource_deviceMapTo;
pThis->__deviceControl_Prologue__ = &__nvoc_thunk_RmResource_deviceControl_Prologue;
pThis->__deviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize;
pThis->__deviceCanCopy__ = &__nvoc_thunk_RsResource_deviceCanCopy;
pThis->__devicePreDestruct__ = &__nvoc_thunk_RsResource_devicePreDestruct;
pThis->__deviceUnmapFrom__ = &__nvoc_thunk_RsResource_deviceUnmapFrom;
pThis->__deviceControl_Epilogue__ = &__nvoc_thunk_RmResource_deviceControl_Epilogue;
pThis->__deviceControlLookup__ = &__nvoc_thunk_RsResource_deviceControlLookup;
pThis->__deviceMap__ = &__nvoc_thunk_GpuResource_deviceMap;
pThis->__deviceAccessCallback__ = &__nvoc_thunk_RmResource_deviceAccessCallback;
}
void __nvoc_init_funcTable_Device(Device *pThis) {
__nvoc_init_funcTable_Device_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_Device(Device *pThis) {
pThis->__nvoc_pbase_Device = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_Device(pThis);
}
NV_STATUS __nvoc_objCreate_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
Device *pThis;
pThis = portMemAllocNonPaged(sizeof(Device));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(Device));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Device);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_Device(pThis);
status = __nvoc_ctor_Device(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_Device_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_Device_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_Device(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,466 @@
#ifndef _G_DEVICE_NVOC_H_
#define _G_DEVICE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_device_nvoc.h"
#ifndef _DEVICE_H_
#define _DEVICE_H_
#include "core/core.h"
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "nvoc/utility.h"
#include "resserv/rs_resource.h"
#include "rmapi/control.h"
#include "containers/btree.h"
#include "gpu/gpu_resource.h"
#include "mem_mgr/vaspace.h"
#include "ctrl/ctrl0080.h" // rmcontrol params
// Forward declaration
struct HOST_VGPU_DEVICE;
struct OBJVASPACE;
#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__
#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__
typedef struct OBJVASPACE OBJVASPACE;
#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJVASPACE
#define __nvoc_class_id_OBJVASPACE 0x6c347f
#endif /* __nvoc_class_id_OBJVASPACE */
// TODO: Remove this after adding KERNEL_HOST_VGPU_DEVICE
typedef struct HOST_VGPU_DEVICE KERNEL_HOST_VGPU_DEVICE;
/**
* A device consists of one or more GPUs. Devices provide broadcast
* semantics; that is, operations involving a device are applied to all GPUs
* in the device.
*/
#ifdef NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct Device {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct Device *__nvoc_pbase_Device;
NV_STATUS (*__deviceControl__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__deviceInternalControlForward__)(struct Device *, NvU32, void *, NvU32);
NV_STATUS (*__deviceCtrlCmdGpuGetClasslist__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuGetClasslistV2__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuGetNumSubdevices__)(struct Device *, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuModifyGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuQueryGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuGetVirtualizationMode__)(struct Device *, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuSetVgpuVfBar1Size__)(struct Device *, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuGetBrandCaps__)(struct Device *, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdGpuGetFindSubDeviceHandle__)(struct Device *, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *);
NV_STATUS (*__deviceCtrlCmdOsUnixVTSwitch__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *);
NV_STATUS (*__deviceCtrlCmdOsUnixVTGetFBInfo__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *);
NvBool (*__deviceShareCallback__)(struct Device *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__deviceUnmap__)(struct Device *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__deviceGetMemInterMapParams__)(struct Device *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__deviceGetMemoryMappingDescriptor__)(struct Device *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__deviceGetMapAddrSpace__)(struct Device *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__deviceGetInternalObjectHandle__)(struct Device *);
NV_STATUS (*__deviceControlFilter__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__deviceAddAdditionalDependants__)(struct RsClient *, struct Device *, RsResourceRef *);
NvU32 (*__deviceGetRefCount__)(struct Device *);
NV_STATUS (*__deviceCheckMemInterUnmap__)(struct Device *, NvBool);
NV_STATUS (*__deviceMapTo__)(struct Device *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__deviceControl_Prologue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__deviceGetRegBaseOffsetAndSize__)(struct Device *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__deviceCanCopy__)(struct Device *);
void (*__devicePreDestruct__)(struct Device *);
NV_STATUS (*__deviceUnmapFrom__)(struct Device *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__deviceControl_Epilogue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__deviceControlLookup__)(struct Device *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__deviceMap__)(struct Device *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__deviceAccessCallback__)(struct Device *, struct RsClient *, void *, RsAccessRight);
NvU32 deviceInst;
NvU32 PerfReqCnt;
PNODE DevMemoryTable;
NvBool bSliGpuBoostSyncActivate;
NvBool bPerfOptpActive;
NvU32 nPerfOptpRefCnt;
NvU32 nCudaLimitRefCnt;
struct OBJVASPACE *pVASpace;
NvHandle hClientShare;
NvHandle hTargetClient;
NvHandle hTargetDevice;
NvU32 deviceAllocFlags;
NvU32 deviceInternalAllocFlags;
NvU64 vaStartInternal;
NvU64 vaLimitInternal;
NvU64 vaSize;
NvU32 vaMode;
struct HOST_VGPU_DEVICE *pHostVgpuDevice;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
};
#ifndef __NVOC_CLASS_Device_TYPEDEF__
#define __NVOC_CLASS_Device_TYPEDEF__
typedef struct Device Device;
#endif /* __NVOC_CLASS_Device_TYPEDEF__ */
#ifndef __nvoc_class_id_Device
#define __nvoc_class_id_Device 0xe0ac20
#endif /* __nvoc_class_id_Device */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device;
#define __staticCast_Device(pThis) \
((pThis)->__nvoc_pbase_Device)
#ifdef __nvoc_device_h_disabled
#define __dynamicCast_Device(pThis) ((Device*)NULL)
#else //__nvoc_device_h_disabled
#define __dynamicCast_Device(pThis) \
((Device*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Device)))
#endif //__nvoc_device_h_disabled
NV_STATUS __nvoc_objCreateDynamic_Device(Device**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_Device(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_Device((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams)
#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size)
#define deviceCtrlCmdGpuGetClasslist(pDevice, pClassListParams) deviceCtrlCmdGpuGetClasslist_DISPATCH(pDevice, pClassListParams)
#define deviceCtrlCmdGpuGetClasslistV2(pDevice, pParams) deviceCtrlCmdGpuGetClasslistV2_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuGetNumSubdevices(pDevice, pSubDeviceCountParams) deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(pDevice, pSubDeviceCountParams)
#define deviceCtrlCmdGpuModifyGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuQueryGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuGetVirtualizationMode(pDevice, pParams) deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuSetVgpuVfBar1Size(pDevice, pParams) deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuGetBrandCaps(pDevice, pParams) deviceCtrlCmdGpuGetBrandCaps_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdGpuGetFindSubDeviceHandle(pDevice, pParams) deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdOsUnixVTSwitch(pDevice, pParams) deviceCtrlCmdOsUnixVTSwitch_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdOsUnixVTGetFBInfo(pDevice, pParams) deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(pDevice, pParams)
#define deviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) deviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define deviceUnmap(pGpuResource, pCallContext, pCpuMapping) deviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define deviceGetMemInterMapParams(pRmResource, pParams) deviceGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define deviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) deviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define deviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) deviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define deviceGetInternalObjectHandle(pGpuResource) deviceGetInternalObjectHandle_DISPATCH(pGpuResource)
#define deviceControlFilter(pResource, pCallContext, pParams) deviceControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define deviceAddAdditionalDependants(pClient, pResource, pReference) deviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define deviceGetRefCount(pResource) deviceGetRefCount_DISPATCH(pResource)
#define deviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) deviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define deviceMapTo(pResource, pParams) deviceMapTo_DISPATCH(pResource, pParams)
#define deviceControl_Prologue(pResource, pCallContext, pParams) deviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define deviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) deviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define deviceCanCopy(pResource) deviceCanCopy_DISPATCH(pResource)
#define devicePreDestruct(pResource) devicePreDestruct_DISPATCH(pResource)
#define deviceUnmapFrom(pResource, pParams) deviceUnmapFrom_DISPATCH(pResource, pParams)
#define deviceControl_Epilogue(pResource, pCallContext, pParams) deviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define deviceControlLookup(pResource, pParams, ppEntry) deviceControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define deviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) deviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define deviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) deviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS deviceControl_IMPL(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
static inline NV_STATUS deviceControl_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__deviceControl__(pResource, pCallContext, pParams);
}
NV_STATUS deviceInternalControlForward_IMPL(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size);
static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size) {
return pDevice->__deviceInternalControlForward__(pDevice, command, pParams, size);
}
NV_STATUS deviceCtrlCmdGpuGetClasslist_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams);
static inline NV_STATUS deviceCtrlCmdGpuGetClasslist_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams) {
return pDevice->__deviceCtrlCmdGpuGetClasslist__(pDevice, pClassListParams);
}
NV_STATUS deviceCtrlCmdGpuGetClasslistV2_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuGetClasslistV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuGetClasslistV2__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams);
static inline NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams) {
return pDevice->__deviceCtrlCmdGpuGetNumSubdevices__(pDevice, pSubDeviceCountParams);
}
NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuGetVirtualizationMode__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuSetVgpuVfBar1Size__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuGetBrandCaps_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdGpuGetBrandCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdGpuGetBrandCaps__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams);
static inline NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams) {
return pDevice->__deviceCtrlCmdGpuGetFindSubDeviceHandle__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdOsUnixVTSwitch_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdOsUnixVTSwitch_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdOsUnixVTSwitch__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdOsUnixVTGetFBInfo__(pDevice, pParams);
}
static inline NvBool deviceShareCallback_DISPATCH(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__deviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS deviceUnmap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__deviceUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS deviceGetMemInterMapParams_DISPATCH(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__deviceGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS deviceGetMemoryMappingDescriptor_DISPATCH(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__deviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS deviceGetMapAddrSpace_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__deviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle deviceGetInternalObjectHandle_DISPATCH(struct Device *pGpuResource) {
return pGpuResource->__deviceGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS deviceControlFilter_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__deviceControlFilter__(pResource, pCallContext, pParams);
}
static inline void deviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) {
pResource->__deviceAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 deviceGetRefCount_DISPATCH(struct Device *pResource) {
return pResource->__deviceGetRefCount__(pResource);
}
static inline NV_STATUS deviceCheckMemInterUnmap_DISPATCH(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__deviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS deviceMapTo_DISPATCH(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__deviceMapTo__(pResource, pParams);
}
static inline NV_STATUS deviceControl_Prologue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__deviceControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS deviceGetRegBaseOffsetAndSize_DISPATCH(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__deviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
static inline NvBool deviceCanCopy_DISPATCH(struct Device *pResource) {
return pResource->__deviceCanCopy__(pResource);
}
static inline void devicePreDestruct_DISPATCH(struct Device *pResource) {
pResource->__devicePreDestruct__(pResource);
}
static inline NV_STATUS deviceUnmapFrom_DISPATCH(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__deviceUnmapFrom__(pResource, pParams);
}
static inline void deviceControl_Epilogue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__deviceControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS deviceControlLookup_DISPATCH(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__deviceControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS deviceMap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__deviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool deviceAccessCallback_DISPATCH(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__deviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
static inline NV_STATUS deviceSetDefaultVASpace(struct Device *pDevice, NvHandle hVASpace) {
return NV_OK;
}
NV_STATUS deviceConstruct_IMPL(struct Device *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_deviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) deviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams)
void deviceDestruct_IMPL(struct Device *pResource);
#define __nvoc_deviceDestruct(pResource) deviceDestruct_IMPL(pResource)
NV_STATUS deviceInit_IMPL(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode);
#ifdef __nvoc_device_h_disabled
static inline NV_STATUS deviceInit(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode) {
NV_ASSERT_FAILED_PRECOMP("Device was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_device_h_disabled
#define deviceInit(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode) deviceInit_IMPL(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode)
#endif //__nvoc_device_h_disabled
NV_STATUS deviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDevice, struct Device **ppDevice);
#define deviceGetByHandle(pClient, hDevice, ppDevice) deviceGetByHandle_IMPL(pClient, hDevice, ppDevice)
NV_STATUS deviceGetByInstance_IMPL(struct RsClient *pClient, NvU32 deviceInstance, struct Device **ppDevice);
#define deviceGetByInstance(pClient, deviceInstance, ppDevice) deviceGetByInstance_IMPL(pClient, deviceInstance, ppDevice)
NV_STATUS deviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, NvBool bAnyInGroup, struct Device **ppDevice);
#define deviceGetByGpu(pClient, pGpu, bAnyInGroup, ppDevice) deviceGetByGpu_IMPL(pClient, pGpu, bAnyInGroup, ppDevice)
NV_STATUS deviceGetDefaultVASpace_IMPL(struct Device *pDevice, struct OBJVASPACE **ppVAS);
#ifdef __nvoc_device_h_disabled
static inline NV_STATUS deviceGetDefaultVASpace(struct Device *pDevice, struct OBJVASPACE **ppVAS) {
NV_ASSERT_FAILED_PRECOMP("Device was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_device_h_disabled
#define deviceGetDefaultVASpace(pDevice, ppVAS) deviceGetDefaultVASpace_IMPL(pDevice, ppVAS)
#endif //__nvoc_device_h_disabled
NV_STATUS deviceSetClientShare_IMPL(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags);
#ifdef __nvoc_device_h_disabled
static inline NV_STATUS deviceSetClientShare(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags) {
NV_ASSERT_FAILED_PRECOMP("Device was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_device_h_disabled
#define deviceSetClientShare(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) deviceSetClientShare_IMPL(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags)
#endif //__nvoc_device_h_disabled
void deviceRemoveFromClientShare_IMPL(struct Device *pDevice);
#ifdef __nvoc_device_h_disabled
static inline void deviceRemoveFromClientShare(struct Device *pDevice) {
NV_ASSERT_FAILED_PRECOMP("Device was disabled!");
}
#else //__nvoc_device_h_disabled
#define deviceRemoveFromClientShare(pDevice) deviceRemoveFromClientShare_IMPL(pDevice)
#endif //__nvoc_device_h_disabled
#undef PRIVATE_FIELD
// ****************************************************************************
// Deprecated Definitions
// ****************************************************************************
/**
* WARNING: This function is deprecated! Please use deviceGetByHandle.
*/
struct Device *CliGetDeviceInfo(NvHandle, NvHandle);
/**
* WARNING: This function is deprecated and use is *strongly* discouraged
* (especially for new code!)
*
* From the function name (CliSetGpuContext) it appears as a simple accessor but
* violates expectations by modifying the SLI BC threadstate (calls to
* GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed
* by the caller.
*
* Instead of using this routine, please use deviceGetByHandle then call
* GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed.
*
* Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice,
* pSubdevice, the base pResource type, and any resource that inherits from
* GpuResource. That is, instead of using CliSetGpuContext or
* CliSetSubDeviceContext, please use following pattern to look up the pGpu:
*
* OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource)
*
* To set the threadstate, please use:
*
* GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource);
*/
NV_STATUS CliSetGpuContext(NvHandle, NvHandle, OBJGPU **, struct OBJGPUGRP **);
/**
* WARNING: This function is deprecated! Please use gpuGetByRef()
*/
OBJGPU *CliGetGpuFromContext(RsResourceRef *pContextRef, NvBool *pbBroadcast);
/**
* WARNING: This function is deprecated! Please use gpuGetByHandle()
*/
OBJGPU *CliGetGpuFromHandle(NvHandle hClient, NvHandle hResource, NvBool *pbBroadcast);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DEVICE_NVOC_H_

View File

@@ -0,0 +1,329 @@
#define NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_disp_capabilities_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x99db3e = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_DispCapabilities(DispCapabilities*);
void __nvoc_init_funcTable_DispCapabilities(DispCapabilities*);
NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_DispCapabilities(DispCapabilities*);
void __nvoc_dtor_DispCapabilities(DispCapabilities*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities;
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_DispCapabilities = {
/*pClassDef=*/ &__nvoc_class_def_DispCapabilities,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCapabilities,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_DispCapabilities = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_DispCapabilities_DispCapabilities,
&__nvoc_rtti_DispCapabilities_GpuResource,
&__nvoc_rtti_DispCapabilities_RmResource,
&__nvoc_rtti_DispCapabilities_RmResourceCommon,
&__nvoc_rtti_DispCapabilities_RsResource,
&__nvoc_rtti_DispCapabilities_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities =
{
/*classInfo=*/ {
/*size=*/ sizeof(DispCapabilities),
/*classId=*/ classId(DispCapabilities),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "DispCapabilities",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCapabilities,
/*pCastInfo=*/ &__nvoc_castinfo_DispCapabilities,
/*pExportInfo=*/ &__nvoc_export_info_DispCapabilities
};
static NV_STATUS __nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return dispcapGetRegBaseOffsetAndSize((struct DispCapabilities *)(((unsigned char *)pDispCapabilities) - __nvoc_rtti_DispCapabilities_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_dispcapControlLookup(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_DispCapabilities(DispCapabilities *pThis) {
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_DispCapabilities(DispCapabilities *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail_GpuResource;
__nvoc_init_dataField_DispCapabilities(pThis);
status = __nvoc_dispcapConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail__init;
goto __nvoc_ctor_DispCapabilities_exit; // Success
__nvoc_ctor_DispCapabilities_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_DispCapabilities_fail_GpuResource:
__nvoc_ctor_DispCapabilities_exit:
return status;
}
static void __nvoc_init_funcTable_DispCapabilities_1(DispCapabilities *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__dispcapGetRegBaseOffsetAndSize__ = &dispcapGetRegBaseOffsetAndSize_IMPL;
pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize;
pThis->__dispcapShareCallback__ = &__nvoc_thunk_GpuResource_dispcapShareCallback;
pThis->__dispcapControl__ = &__nvoc_thunk_GpuResource_dispcapControl;
pThis->__dispcapUnmap__ = &__nvoc_thunk_GpuResource_dispcapUnmap;
pThis->__dispcapGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcapGetMemInterMapParams;
pThis->__dispcapGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor;
pThis->__dispcapGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispcapGetMapAddrSpace;
pThis->__dispcapGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle;
pThis->__dispcapControlFilter__ = &__nvoc_thunk_RsResource_dispcapControlFilter;
pThis->__dispcapAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcapAddAdditionalDependants;
pThis->__dispcapGetRefCount__ = &__nvoc_thunk_RsResource_dispcapGetRefCount;
pThis->__dispcapCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcapCheckMemInterUnmap;
pThis->__dispcapMapTo__ = &__nvoc_thunk_RsResource_dispcapMapTo;
pThis->__dispcapControl_Prologue__ = &__nvoc_thunk_RmResource_dispcapControl_Prologue;
pThis->__dispcapCanCopy__ = &__nvoc_thunk_RsResource_dispcapCanCopy;
pThis->__dispcapInternalControlForward__ = &__nvoc_thunk_GpuResource_dispcapInternalControlForward;
pThis->__dispcapPreDestruct__ = &__nvoc_thunk_RsResource_dispcapPreDestruct;
pThis->__dispcapUnmapFrom__ = &__nvoc_thunk_RsResource_dispcapUnmapFrom;
pThis->__dispcapControl_Epilogue__ = &__nvoc_thunk_RmResource_dispcapControl_Epilogue;
pThis->__dispcapControlLookup__ = &__nvoc_thunk_RsResource_dispcapControlLookup;
pThis->__dispcapMap__ = &__nvoc_thunk_GpuResource_dispcapMap;
pThis->__dispcapAccessCallback__ = &__nvoc_thunk_RmResource_dispcapAccessCallback;
}
void __nvoc_init_funcTable_DispCapabilities(DispCapabilities *pThis) {
__nvoc_init_funcTable_DispCapabilities_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_DispCapabilities(DispCapabilities *pThis) {
pThis->__nvoc_pbase_DispCapabilities = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_DispCapabilities(pThis);
}
NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
DispCapabilities *pThis;
pThis = portMemAllocNonPaged(sizeof(DispCapabilities));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(DispCapabilities));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCapabilities);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_DispCapabilities(pThis);
status = __nvoc_ctor_DispCapabilities(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_DispCapabilities_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_DispCapabilities_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_DispCapabilities(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,239 @@
#ifndef _G_DISP_CAPABILITIES_NVOC_H_
#define _G_DISP_CAPABILITIES_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/******************************************************************************
*
* Description:
* This file contains functions managing DispCapabilities class.
*
******************************************************************************/
#include "g_disp_capabilities_nvoc.h"
#ifndef DISP_CAPABILITIES_H
#define DISP_CAPABILITIES_H
#include "gpu/gpu_resource.h"
/*!
* RM internal class representing NVXXXX_DISP_CAPABILITIES
*/
#ifdef NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DispCapabilities {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct DispCapabilities *__nvoc_pbase_DispCapabilities;
NV_STATUS (*__dispcapGetRegBaseOffsetAndSize__)(struct DispCapabilities *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__dispcapShareCallback__)(struct DispCapabilities *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__dispcapControl__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispcapUnmap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__dispcapGetMemInterMapParams__)(struct DispCapabilities *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__dispcapGetMemoryMappingDescriptor__)(struct DispCapabilities *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__dispcapGetMapAddrSpace__)(struct DispCapabilities *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__dispcapGetInternalObjectHandle__)(struct DispCapabilities *);
NV_STATUS (*__dispcapControlFilter__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__dispcapAddAdditionalDependants__)(struct RsClient *, struct DispCapabilities *, RsResourceRef *);
NvU32 (*__dispcapGetRefCount__)(struct DispCapabilities *);
NV_STATUS (*__dispcapCheckMemInterUnmap__)(struct DispCapabilities *, NvBool);
NV_STATUS (*__dispcapMapTo__)(struct DispCapabilities *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__dispcapControl_Prologue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__dispcapCanCopy__)(struct DispCapabilities *);
NV_STATUS (*__dispcapInternalControlForward__)(struct DispCapabilities *, NvU32, void *, NvU32);
void (*__dispcapPreDestruct__)(struct DispCapabilities *);
NV_STATUS (*__dispcapUnmapFrom__)(struct DispCapabilities *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__dispcapControl_Epilogue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispcapControlLookup__)(struct DispCapabilities *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__dispcapMap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__dispcapAccessCallback__)(struct DispCapabilities *, struct RsClient *, void *, RsAccessRight);
NvU32 ControlOffset;
NvU32 ControlLength;
};
#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__
#define __NVOC_CLASS_DispCapabilities_TYPEDEF__
typedef struct DispCapabilities DispCapabilities;
#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */
#ifndef __nvoc_class_id_DispCapabilities
#define __nvoc_class_id_DispCapabilities 0x99db3e
#endif /* __nvoc_class_id_DispCapabilities */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities;
#define __staticCast_DispCapabilities(pThis) \
((pThis)->__nvoc_pbase_DispCapabilities)
#ifdef __nvoc_disp_capabilities_h_disabled
#define __dynamicCast_DispCapabilities(pThis) ((DispCapabilities*)NULL)
#else //__nvoc_disp_capabilities_h_disabled
#define __dynamicCast_DispCapabilities(pThis) \
((DispCapabilities*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCapabilities)))
#endif //__nvoc_disp_capabilities_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_DispCapabilities(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_DispCapabilities((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define dispcapGetRegBaseOffsetAndSize(pDispCapabilities, pGpu, pOffset, pSize) dispcapGetRegBaseOffsetAndSize_DISPATCH(pDispCapabilities, pGpu, pOffset, pSize)
#define dispcapShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispcapShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define dispcapControl(pGpuResource, pCallContext, pParams) dispcapControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define dispcapUnmap(pGpuResource, pCallContext, pCpuMapping) dispcapUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define dispcapGetMemInterMapParams(pRmResource, pParams) dispcapGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define dispcapGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcapGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define dispcapGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispcapGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define dispcapGetInternalObjectHandle(pGpuResource) dispcapGetInternalObjectHandle_DISPATCH(pGpuResource)
#define dispcapControlFilter(pResource, pCallContext, pParams) dispcapControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define dispcapAddAdditionalDependants(pClient, pResource, pReference) dispcapAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define dispcapGetRefCount(pResource) dispcapGetRefCount_DISPATCH(pResource)
#define dispcapCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcapCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define dispcapMapTo(pResource, pParams) dispcapMapTo_DISPATCH(pResource, pParams)
#define dispcapControl_Prologue(pResource, pCallContext, pParams) dispcapControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define dispcapCanCopy(pResource) dispcapCanCopy_DISPATCH(pResource)
#define dispcapInternalControlForward(pGpuResource, command, pParams, size) dispcapInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define dispcapPreDestruct(pResource) dispcapPreDestruct_DISPATCH(pResource)
#define dispcapUnmapFrom(pResource, pParams) dispcapUnmapFrom_DISPATCH(pResource, pParams)
#define dispcapControl_Epilogue(pResource, pCallContext, pParams) dispcapControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define dispcapControlLookup(pResource, pParams, ppEntry) dispcapControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define dispcapMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispcapMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define dispcapAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcapAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS dispcapGetRegBaseOffsetAndSize_IMPL(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
static inline NV_STATUS dispcapGetRegBaseOffsetAndSize_DISPATCH(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pDispCapabilities->__dispcapGetRegBaseOffsetAndSize__(pDispCapabilities, pGpu, pOffset, pSize);
}
static inline NvBool dispcapShareCallback_DISPATCH(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__dispcapShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS dispcapControl_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__dispcapControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS dispcapUnmap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispcapUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS dispcapGetMemInterMapParams_DISPATCH(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__dispcapGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS dispcapGetMemoryMappingDescriptor_DISPATCH(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__dispcapGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS dispcapGetMapAddrSpace_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__dispcapGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle dispcapGetInternalObjectHandle_DISPATCH(struct DispCapabilities *pGpuResource) {
return pGpuResource->__dispcapGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS dispcapControlFilter_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispcapControlFilter__(pResource, pCallContext, pParams);
}
static inline void dispcapAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) {
pResource->__dispcapAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 dispcapGetRefCount_DISPATCH(struct DispCapabilities *pResource) {
return pResource->__dispcapGetRefCount__(pResource);
}
static inline NV_STATUS dispcapCheckMemInterUnmap_DISPATCH(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__dispcapCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS dispcapMapTo_DISPATCH(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__dispcapMapTo__(pResource, pParams);
}
static inline NV_STATUS dispcapControl_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispcapControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool dispcapCanCopy_DISPATCH(struct DispCapabilities *pResource) {
return pResource->__dispcapCanCopy__(pResource);
}
static inline NV_STATUS dispcapInternalControlForward_DISPATCH(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__dispcapInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void dispcapPreDestruct_DISPATCH(struct DispCapabilities *pResource) {
pResource->__dispcapPreDestruct__(pResource);
}
static inline NV_STATUS dispcapUnmapFrom_DISPATCH(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__dispcapUnmapFrom__(pResource, pParams);
}
static inline void dispcapControl_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__dispcapControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispcapControlLookup_DISPATCH(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__dispcapControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS dispcapMap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispcapMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool dispcapAccessCallback_DISPATCH(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__dispcapAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS dispcapConstruct_IMPL(struct DispCapabilities *arg_pDispCapabilities, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_dispcapConstruct(arg_pDispCapabilities, arg_pCallContext, arg_pParams) dispcapConstruct_IMPL(arg_pDispCapabilities, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#endif // DISP_CAPABILITIES_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DISP_CAPABILITIES_NVOC_H_

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,776 @@
#ifndef _G_DISP_CHANNEL_NVOC_H_
#define _G_DISP_CHANNEL_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/******************************************************************************
*
* Description:
* This file contains functions managing DispChannel and its derived classes.
*
******************************************************************************/
#include "g_disp_channel_nvoc.h"
#ifndef DISP_CHANNEL_H
#define DISP_CHANNEL_H
#include "gpu/gpu_resource.h"
#include "rmapi/event.h"
struct ContextDma;
#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__
#define __NVOC_CLASS_ContextDma_TYPEDEF__
typedef struct ContextDma ContextDma;
#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */
#ifndef __nvoc_class_id_ContextDma
#define __nvoc_class_id_ContextDma 0x88441b
#endif /* __nvoc_class_id_ContextDma */
struct DispObject;
#ifndef __NVOC_CLASS_DispObject_TYPEDEF__
#define __NVOC_CLASS_DispObject_TYPEDEF__
typedef struct DispObject DispObject;
#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */
#ifndef __nvoc_class_id_DispObject
#define __nvoc_class_id_DispObject 0x999839
#endif /* __nvoc_class_id_DispObject */
/*!
* Base class for display channels
*/
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DispChannel {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Notifier __nvoc_base_Notifier;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
struct DispChannel *__nvoc_pbase_DispChannel;
NV_STATUS (*__dispchnGetRegBaseOffsetAndSize__)(struct DispChannel *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__dispchnShareCallback__)(struct DispChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__dispchnMapTo__)(struct DispChannel *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__dispchnGetOrAllocNotifShare__)(struct DispChannel *, NvHandle, NvHandle, struct NotifShare **);
NV_STATUS (*__dispchnCheckMemInterUnmap__)(struct DispChannel *, NvBool);
NV_STATUS (*__dispchnGetMapAddrSpace__)(struct DispChannel *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
void (*__dispchnSetNotificationShare__)(struct DispChannel *, struct NotifShare *);
NvU32 (*__dispchnGetRefCount__)(struct DispChannel *);
void (*__dispchnAddAdditionalDependants__)(struct RsClient *, struct DispChannel *, RsResourceRef *);
NV_STATUS (*__dispchnControl_Prologue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnInternalControlForward__)(struct DispChannel *, NvU32, void *, NvU32);
NV_STATUS (*__dispchnUnmapFrom__)(struct DispChannel *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__dispchnControl_Epilogue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnControlLookup__)(struct DispChannel *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NvHandle (*__dispchnGetInternalObjectHandle__)(struct DispChannel *);
NV_STATUS (*__dispchnControl__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnUnmap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__dispchnGetMemInterMapParams__)(struct DispChannel *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__dispchnGetMemoryMappingDescriptor__)(struct DispChannel *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__dispchnControlFilter__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnUnregisterEvent__)(struct DispChannel *, NvHandle, NvHandle, NvHandle, NvHandle);
NvBool (*__dispchnCanCopy__)(struct DispChannel *);
void (*__dispchnPreDestruct__)(struct DispChannel *);
PEVENTNOTIFICATION *(*__dispchnGetNotificationListPtr__)(struct DispChannel *);
struct NotifShare *(*__dispchnGetNotificationShare__)(struct DispChannel *);
NV_STATUS (*__dispchnMap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__dispchnAccessCallback__)(struct DispChannel *, struct RsClient *, void *, RsAccessRight);
struct DispObject *pDispObject;
NvU32 DispClass;
NvU32 InstanceNumber;
NvP64 pControl;
NvP64 pPriv;
NvU32 ControlOffset;
NvU32 ControlLength;
NvBool bIsDma;
};
#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__
#define __NVOC_CLASS_DispChannel_TYPEDEF__
typedef struct DispChannel DispChannel;
#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */
#ifndef __nvoc_class_id_DispChannel
#define __nvoc_class_id_DispChannel 0xbd2ff3
#endif /* __nvoc_class_id_DispChannel */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel;
#define __staticCast_DispChannel(pThis) \
((pThis)->__nvoc_pbase_DispChannel)
#ifdef __nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannel(pThis) ((DispChannel*)NULL)
#else //__nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannel(pThis) \
((DispChannel*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannel)))
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DispChannel(DispChannel**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma);
#define __objCreate_DispChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma) \
__nvoc_objCreate_DispChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_isDma)
#define dispchnGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
#define dispchnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define dispchnMapTo(pResource, pParams) dispchnMapTo_DISPATCH(pResource, pParams)
#define dispchnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
#define dispchnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define dispchnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define dispchnSetNotificationShare(pNotifier, pNotifShare) dispchnSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define dispchnGetRefCount(pResource) dispchnGetRefCount_DISPATCH(pResource)
#define dispchnAddAdditionalDependants(pClient, pResource, pReference) dispchnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define dispchnControl_Prologue(pResource, pCallContext, pParams) dispchnControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define dispchnInternalControlForward(pGpuResource, command, pParams, size) dispchnInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define dispchnUnmapFrom(pResource, pParams) dispchnUnmapFrom_DISPATCH(pResource, pParams)
#define dispchnControl_Epilogue(pResource, pCallContext, pParams) dispchnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define dispchnControlLookup(pResource, pParams, ppEntry) dispchnControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define dispchnGetInternalObjectHandle(pGpuResource) dispchnGetInternalObjectHandle_DISPATCH(pGpuResource)
#define dispchnControl(pGpuResource, pCallContext, pParams) dispchnControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define dispchnUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define dispchnGetMemInterMapParams(pRmResource, pParams) dispchnGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define dispchnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define dispchnControlFilter(pResource, pCallContext, pParams) dispchnControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define dispchnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define dispchnCanCopy(pResource) dispchnCanCopy_DISPATCH(pResource)
#define dispchnPreDestruct(pResource) dispchnPreDestruct_DISPATCH(pResource)
#define dispchnGetNotificationListPtr(pNotifier) dispchnGetNotificationListPtr_DISPATCH(pNotifier)
#define dispchnGetNotificationShare(pNotifier) dispchnGetNotificationShare_DISPATCH(pNotifier)
#define dispchnMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define dispchnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS dispchnGetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
static inline NV_STATUS dispchnGetRegBaseOffsetAndSize_DISPATCH(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pDispChannel->__dispchnGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
}
static inline NvBool dispchnShareCallback_DISPATCH(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__dispchnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS dispchnMapTo_DISPATCH(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__dispchnMapTo__(pResource, pParams);
}
static inline NV_STATUS dispchnGetOrAllocNotifShare_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__dispchnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
static inline NV_STATUS dispchnCheckMemInterUnmap_DISPATCH(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__dispchnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS dispchnGetMapAddrSpace_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__dispchnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline void dispchnSetNotificationShare_DISPATCH(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__dispchnSetNotificationShare__(pNotifier, pNotifShare);
}
static inline NvU32 dispchnGetRefCount_DISPATCH(struct DispChannel *pResource) {
return pResource->__dispchnGetRefCount__(pResource);
}
static inline void dispchnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) {
pResource->__dispchnAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS dispchnControl_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchnControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnInternalControlForward_DISPATCH(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__dispchnInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline NV_STATUS dispchnUnmapFrom_DISPATCH(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__dispchnUnmapFrom__(pResource, pParams);
}
static inline void dispchnControl_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__dispchnControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnControlLookup_DISPATCH(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__dispchnControlLookup__(pResource, pParams, ppEntry);
}
static inline NvHandle dispchnGetInternalObjectHandle_DISPATCH(struct DispChannel *pGpuResource) {
return pGpuResource->__dispchnGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS dispchnControl_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__dispchnControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnUnmap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchnUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS dispchnGetMemInterMapParams_DISPATCH(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__dispchnGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS dispchnGetMemoryMappingDescriptor_DISPATCH(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__dispchnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS dispchnControlFilter_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchnControlFilter__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnUnregisterEvent_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__dispchnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NvBool dispchnCanCopy_DISPATCH(struct DispChannel *pResource) {
return pResource->__dispchnCanCopy__(pResource);
}
static inline void dispchnPreDestruct_DISPATCH(struct DispChannel *pResource) {
pResource->__dispchnPreDestruct__(pResource);
}
static inline PEVENTNOTIFICATION *dispchnGetNotificationListPtr_DISPATCH(struct DispChannel *pNotifier) {
return pNotifier->__dispchnGetNotificationListPtr__(pNotifier);
}
static inline struct NotifShare *dispchnGetNotificationShare_DISPATCH(struct DispChannel *pNotifier) {
return pNotifier->__dispchnGetNotificationShare__(pNotifier);
}
static inline NV_STATUS dispchnMap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchnMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool dispchnAccessCallback_DISPATCH(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__dispchnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS dispchnConstruct_IMPL(struct DispChannel *arg_pDispChannel, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma);
#define __nvoc_dispchnConstruct(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) dispchnConstruct_IMPL(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma)
void dispchnDestruct_IMPL(struct DispChannel *pDispChannel);
#define __nvoc_dispchnDestruct(pDispChannel) dispchnDestruct_IMPL(pDispChannel)
void dispchnSetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu);
#ifdef __nvoc_disp_channel_h_disabled
static inline void dispchnSetRegBaseOffsetAndSize(struct DispChannel *pDispChannel, struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
}
#else //__nvoc_disp_channel_h_disabled
#define dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu) dispchnSetRegBaseOffsetAndSize_IMPL(pDispChannel, pGpu)
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS dispchnGrabChannel_IMPL(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms);
#ifdef __nvoc_disp_channel_h_disabled
static inline NV_STATUS dispchnGrabChannel(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms) {
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_channel_h_disabled
#define dispchnGrabChannel(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) dispchnGrabChannel_IMPL(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms)
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS dispchnBindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel);
#define dispchnBindCtx(pGpu, pContextDma, hDispChannel) dispchnBindCtx_IMPL(pGpu, pContextDma, hDispChannel)
NV_STATUS dispchnUnbindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel);
#define dispchnUnbindCtx(pGpu, pContextDma, hDispChannel) dispchnUnbindCtx_IMPL(pGpu, pContextDma, hDispChannel)
void dispchnUnbindCtxFromAllChannels_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma);
#define dispchnUnbindCtxFromAllChannels(pGpu, pContextDma) dispchnUnbindCtxFromAllChannels_IMPL(pGpu, pContextDma)
void dispchnUnbindAllCtx_IMPL(struct OBJGPU *pGpu, struct DispChannel *pDispChannel);
#ifdef __nvoc_disp_channel_h_disabled
static inline void dispchnUnbindAllCtx(struct OBJGPU *pGpu, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
}
#else //__nvoc_disp_channel_h_disabled
#define dispchnUnbindAllCtx(pGpu, pDispChannel) dispchnUnbindAllCtx_IMPL(pGpu, pDispChannel)
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS dispchnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDisplayChannel, struct DispChannel **ppDispChannel);
#define dispchnGetByHandle(pClient, hDisplayChannel, ppDispChannel) dispchnGetByHandle_IMPL(pClient, hDisplayChannel, ppDispChannel)
#undef PRIVATE_FIELD
/*!
* RM internal class representing XXX_XXX_CHANNEL_PIO
*/
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DispChannelPio {
const struct NVOC_RTTI *__nvoc_rtti;
struct DispChannel __nvoc_base_DispChannel;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
struct DispChannel *__nvoc_pbase_DispChannel;
struct DispChannelPio *__nvoc_pbase_DispChannelPio;
NvBool (*__dispchnpioShareCallback__)(struct DispChannelPio *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__dispchnpioMapTo__)(struct DispChannelPio *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__dispchnpioGetOrAllocNotifShare__)(struct DispChannelPio *, NvHandle, NvHandle, struct NotifShare **);
NV_STATUS (*__dispchnpioCheckMemInterUnmap__)(struct DispChannelPio *, NvBool);
NV_STATUS (*__dispchnpioGetMapAddrSpace__)(struct DispChannelPio *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
void (*__dispchnpioSetNotificationShare__)(struct DispChannelPio *, struct NotifShare *);
NvU32 (*__dispchnpioGetRefCount__)(struct DispChannelPio *);
void (*__dispchnpioAddAdditionalDependants__)(struct RsClient *, struct DispChannelPio *, RsResourceRef *);
NV_STATUS (*__dispchnpioControl_Prologue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnpioGetRegBaseOffsetAndSize__)(struct DispChannelPio *, struct OBJGPU *, NvU32 *, NvU32 *);
NV_STATUS (*__dispchnpioInternalControlForward__)(struct DispChannelPio *, NvU32, void *, NvU32);
NV_STATUS (*__dispchnpioUnmapFrom__)(struct DispChannelPio *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__dispchnpioControl_Epilogue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnpioControlLookup__)(struct DispChannelPio *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NvHandle (*__dispchnpioGetInternalObjectHandle__)(struct DispChannelPio *);
NV_STATUS (*__dispchnpioControl__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnpioUnmap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__dispchnpioGetMemInterMapParams__)(struct DispChannelPio *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__dispchnpioGetMemoryMappingDescriptor__)(struct DispChannelPio *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__dispchnpioControlFilter__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchnpioUnregisterEvent__)(struct DispChannelPio *, NvHandle, NvHandle, NvHandle, NvHandle);
NvBool (*__dispchnpioCanCopy__)(struct DispChannelPio *);
void (*__dispchnpioPreDestruct__)(struct DispChannelPio *);
PEVENTNOTIFICATION *(*__dispchnpioGetNotificationListPtr__)(struct DispChannelPio *);
struct NotifShare *(*__dispchnpioGetNotificationShare__)(struct DispChannelPio *);
NV_STATUS (*__dispchnpioMap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__dispchnpioAccessCallback__)(struct DispChannelPio *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__
#define __NVOC_CLASS_DispChannelPio_TYPEDEF__
typedef struct DispChannelPio DispChannelPio;
#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */
#ifndef __nvoc_class_id_DispChannelPio
#define __nvoc_class_id_DispChannelPio 0x10dec3
#endif /* __nvoc_class_id_DispChannelPio */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio;
#define __staticCast_DispChannelPio(pThis) \
((pThis)->__nvoc_pbase_DispChannelPio)
#ifdef __nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannelPio(pThis) ((DispChannelPio*)NULL)
#else //__nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannelPio(pThis) \
((DispChannelPio*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelPio)))
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_DispChannelPio(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_DispChannelPio((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define dispchnpioShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnpioShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define dispchnpioMapTo(pResource, pParams) dispchnpioMapTo_DISPATCH(pResource, pParams)
#define dispchnpioGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnpioGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
#define dispchnpioCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnpioCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define dispchnpioGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnpioGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define dispchnpioSetNotificationShare(pNotifier, pNotifShare) dispchnpioSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define dispchnpioGetRefCount(pResource) dispchnpioGetRefCount_DISPATCH(pResource)
#define dispchnpioAddAdditionalDependants(pClient, pResource, pReference) dispchnpioAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define dispchnpioControl_Prologue(pResource, pCallContext, pParams) dispchnpioControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define dispchnpioGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnpioGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
#define dispchnpioInternalControlForward(pGpuResource, command, pParams, size) dispchnpioInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define dispchnpioUnmapFrom(pResource, pParams) dispchnpioUnmapFrom_DISPATCH(pResource, pParams)
#define dispchnpioControl_Epilogue(pResource, pCallContext, pParams) dispchnpioControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define dispchnpioControlLookup(pResource, pParams, ppEntry) dispchnpioControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define dispchnpioGetInternalObjectHandle(pGpuResource) dispchnpioGetInternalObjectHandle_DISPATCH(pGpuResource)
#define dispchnpioControl(pGpuResource, pCallContext, pParams) dispchnpioControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define dispchnpioUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnpioUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define dispchnpioGetMemInterMapParams(pRmResource, pParams) dispchnpioGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define dispchnpioGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnpioGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define dispchnpioControlFilter(pResource, pCallContext, pParams) dispchnpioControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define dispchnpioUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnpioUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define dispchnpioCanCopy(pResource) dispchnpioCanCopy_DISPATCH(pResource)
#define dispchnpioPreDestruct(pResource) dispchnpioPreDestruct_DISPATCH(pResource)
#define dispchnpioGetNotificationListPtr(pNotifier) dispchnpioGetNotificationListPtr_DISPATCH(pNotifier)
#define dispchnpioGetNotificationShare(pNotifier) dispchnpioGetNotificationShare_DISPATCH(pNotifier)
#define dispchnpioMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnpioMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define dispchnpioAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnpioAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
static inline NvBool dispchnpioShareCallback_DISPATCH(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__dispchnpioShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS dispchnpioMapTo_DISPATCH(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__dispchnpioMapTo__(pResource, pParams);
}
static inline NV_STATUS dispchnpioGetOrAllocNotifShare_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__dispchnpioGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
static inline NV_STATUS dispchnpioCheckMemInterUnmap_DISPATCH(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__dispchnpioCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS dispchnpioGetMapAddrSpace_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__dispchnpioGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline void dispchnpioSetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__dispchnpioSetNotificationShare__(pNotifier, pNotifShare);
}
static inline NvU32 dispchnpioGetRefCount_DISPATCH(struct DispChannelPio *pResource) {
return pResource->__dispchnpioGetRefCount__(pResource);
}
static inline void dispchnpioAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) {
pResource->__dispchnpioAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS dispchnpioControl_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchnpioControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnpioGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pDispChannel->__dispchnpioGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
}
static inline NV_STATUS dispchnpioInternalControlForward_DISPATCH(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__dispchnpioInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline NV_STATUS dispchnpioUnmapFrom_DISPATCH(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__dispchnpioUnmapFrom__(pResource, pParams);
}
static inline void dispchnpioControl_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__dispchnpioControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnpioControlLookup_DISPATCH(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__dispchnpioControlLookup__(pResource, pParams, ppEntry);
}
static inline NvHandle dispchnpioGetInternalObjectHandle_DISPATCH(struct DispChannelPio *pGpuResource) {
return pGpuResource->__dispchnpioGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS dispchnpioControl_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__dispchnpioControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnpioUnmap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchnpioUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS dispchnpioGetMemInterMapParams_DISPATCH(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__dispchnpioGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS dispchnpioGetMemoryMappingDescriptor_DISPATCH(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__dispchnpioGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS dispchnpioControlFilter_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchnpioControlFilter__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchnpioUnregisterEvent_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__dispchnpioUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NvBool dispchnpioCanCopy_DISPATCH(struct DispChannelPio *pResource) {
return pResource->__dispchnpioCanCopy__(pResource);
}
static inline void dispchnpioPreDestruct_DISPATCH(struct DispChannelPio *pResource) {
pResource->__dispchnpioPreDestruct__(pResource);
}
static inline PEVENTNOTIFICATION *dispchnpioGetNotificationListPtr_DISPATCH(struct DispChannelPio *pNotifier) {
return pNotifier->__dispchnpioGetNotificationListPtr__(pNotifier);
}
static inline struct NotifShare *dispchnpioGetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier) {
return pNotifier->__dispchnpioGetNotificationShare__(pNotifier);
}
static inline NV_STATUS dispchnpioMap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchnpioMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool dispchnpioAccessCallback_DISPATCH(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__dispchnpioAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS dispchnpioConstruct_IMPL(struct DispChannelPio *arg_pDispChannelPio, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_dispchnpioConstruct(arg_pDispChannelPio, arg_pCallContext, arg_pParams) dispchnpioConstruct_IMPL(arg_pDispChannelPio, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
/*!
* RM internal class representing XXX_XXX_CHANNEL_DMA
*/
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DispChannelDma {
const struct NVOC_RTTI *__nvoc_rtti;
struct DispChannel __nvoc_base_DispChannel;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
struct DispChannel *__nvoc_pbase_DispChannel;
struct DispChannelDma *__nvoc_pbase_DispChannelDma;
NvBool (*__dispchndmaShareCallback__)(struct DispChannelDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__dispchndmaMapTo__)(struct DispChannelDma *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__dispchndmaGetOrAllocNotifShare__)(struct DispChannelDma *, NvHandle, NvHandle, struct NotifShare **);
NV_STATUS (*__dispchndmaCheckMemInterUnmap__)(struct DispChannelDma *, NvBool);
NV_STATUS (*__dispchndmaGetMapAddrSpace__)(struct DispChannelDma *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
void (*__dispchndmaSetNotificationShare__)(struct DispChannelDma *, struct NotifShare *);
NvU32 (*__dispchndmaGetRefCount__)(struct DispChannelDma *);
void (*__dispchndmaAddAdditionalDependants__)(struct RsClient *, struct DispChannelDma *, RsResourceRef *);
NV_STATUS (*__dispchndmaControl_Prologue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchndmaGetRegBaseOffsetAndSize__)(struct DispChannelDma *, struct OBJGPU *, NvU32 *, NvU32 *);
NV_STATUS (*__dispchndmaInternalControlForward__)(struct DispChannelDma *, NvU32, void *, NvU32);
NV_STATUS (*__dispchndmaUnmapFrom__)(struct DispChannelDma *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__dispchndmaControl_Epilogue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchndmaControlLookup__)(struct DispChannelDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NvHandle (*__dispchndmaGetInternalObjectHandle__)(struct DispChannelDma *);
NV_STATUS (*__dispchndmaControl__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchndmaUnmap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__dispchndmaGetMemInterMapParams__)(struct DispChannelDma *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__dispchndmaGetMemoryMappingDescriptor__)(struct DispChannelDma *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__dispchndmaControlFilter__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispchndmaUnregisterEvent__)(struct DispChannelDma *, NvHandle, NvHandle, NvHandle, NvHandle);
NvBool (*__dispchndmaCanCopy__)(struct DispChannelDma *);
void (*__dispchndmaPreDestruct__)(struct DispChannelDma *);
PEVENTNOTIFICATION *(*__dispchndmaGetNotificationListPtr__)(struct DispChannelDma *);
struct NotifShare *(*__dispchndmaGetNotificationShare__)(struct DispChannelDma *);
NV_STATUS (*__dispchndmaMap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__dispchndmaAccessCallback__)(struct DispChannelDma *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__
#define __NVOC_CLASS_DispChannelDma_TYPEDEF__
typedef struct DispChannelDma DispChannelDma;
#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */
#ifndef __nvoc_class_id_DispChannelDma
#define __nvoc_class_id_DispChannelDma 0xfe3d2e
#endif /* __nvoc_class_id_DispChannelDma */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma;
#define __staticCast_DispChannelDma(pThis) \
((pThis)->__nvoc_pbase_DispChannelDma)
#ifdef __nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannelDma(pThis) ((DispChannelDma*)NULL)
#else //__nvoc_disp_channel_h_disabled
#define __dynamicCast_DispChannelDma(pThis) \
((DispChannelDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelDma)))
#endif //__nvoc_disp_channel_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_DispChannelDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_DispChannelDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define dispchndmaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchndmaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define dispchndmaMapTo(pResource, pParams) dispchndmaMapTo_DISPATCH(pResource, pParams)
#define dispchndmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchndmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
#define dispchndmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchndmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define dispchndmaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchndmaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define dispchndmaSetNotificationShare(pNotifier, pNotifShare) dispchndmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define dispchndmaGetRefCount(pResource) dispchndmaGetRefCount_DISPATCH(pResource)
#define dispchndmaAddAdditionalDependants(pClient, pResource, pReference) dispchndmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define dispchndmaControl_Prologue(pResource, pCallContext, pParams) dispchndmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define dispchndmaGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchndmaGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
#define dispchndmaInternalControlForward(pGpuResource, command, pParams, size) dispchndmaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define dispchndmaUnmapFrom(pResource, pParams) dispchndmaUnmapFrom_DISPATCH(pResource, pParams)
#define dispchndmaControl_Epilogue(pResource, pCallContext, pParams) dispchndmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define dispchndmaControlLookup(pResource, pParams, ppEntry) dispchndmaControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define dispchndmaGetInternalObjectHandle(pGpuResource) dispchndmaGetInternalObjectHandle_DISPATCH(pGpuResource)
#define dispchndmaControl(pGpuResource, pCallContext, pParams) dispchndmaControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define dispchndmaUnmap(pGpuResource, pCallContext, pCpuMapping) dispchndmaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define dispchndmaGetMemInterMapParams(pRmResource, pParams) dispchndmaGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define dispchndmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchndmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define dispchndmaControlFilter(pResource, pCallContext, pParams) dispchndmaControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define dispchndmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchndmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define dispchndmaCanCopy(pResource) dispchndmaCanCopy_DISPATCH(pResource)
#define dispchndmaPreDestruct(pResource) dispchndmaPreDestruct_DISPATCH(pResource)
#define dispchndmaGetNotificationListPtr(pNotifier) dispchndmaGetNotificationListPtr_DISPATCH(pNotifier)
#define dispchndmaGetNotificationShare(pNotifier) dispchndmaGetNotificationShare_DISPATCH(pNotifier)
#define dispchndmaMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchndmaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define dispchndmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchndmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
static inline NvBool dispchndmaShareCallback_DISPATCH(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__dispchndmaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS dispchndmaMapTo_DISPATCH(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__dispchndmaMapTo__(pResource, pParams);
}
static inline NV_STATUS dispchndmaGetOrAllocNotifShare_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__dispchndmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
static inline NV_STATUS dispchndmaCheckMemInterUnmap_DISPATCH(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__dispchndmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS dispchndmaGetMapAddrSpace_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__dispchndmaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline void dispchndmaSetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__dispchndmaSetNotificationShare__(pNotifier, pNotifShare);
}
static inline NvU32 dispchndmaGetRefCount_DISPATCH(struct DispChannelDma *pResource) {
return pResource->__dispchndmaGetRefCount__(pResource);
}
static inline void dispchndmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) {
pResource->__dispchndmaAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS dispchndmaControl_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchndmaControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchndmaGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pDispChannel->__dispchndmaGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
}
static inline NV_STATUS dispchndmaInternalControlForward_DISPATCH(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__dispchndmaInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline NV_STATUS dispchndmaUnmapFrom_DISPATCH(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__dispchndmaUnmapFrom__(pResource, pParams);
}
static inline void dispchndmaControl_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__dispchndmaControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchndmaControlLookup_DISPATCH(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__dispchndmaControlLookup__(pResource, pParams, ppEntry);
}
static inline NvHandle dispchndmaGetInternalObjectHandle_DISPATCH(struct DispChannelDma *pGpuResource) {
return pGpuResource->__dispchndmaGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS dispchndmaControl_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__dispchndmaControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS dispchndmaUnmap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchndmaUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS dispchndmaGetMemInterMapParams_DISPATCH(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__dispchndmaGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS dispchndmaGetMemoryMappingDescriptor_DISPATCH(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__dispchndmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS dispchndmaControlFilter_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispchndmaControlFilter__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispchndmaUnregisterEvent_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__dispchndmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NvBool dispchndmaCanCopy_DISPATCH(struct DispChannelDma *pResource) {
return pResource->__dispchndmaCanCopy__(pResource);
}
static inline void dispchndmaPreDestruct_DISPATCH(struct DispChannelDma *pResource) {
pResource->__dispchndmaPreDestruct__(pResource);
}
static inline PEVENTNOTIFICATION *dispchndmaGetNotificationListPtr_DISPATCH(struct DispChannelDma *pNotifier) {
return pNotifier->__dispchndmaGetNotificationListPtr__(pNotifier);
}
static inline struct NotifShare *dispchndmaGetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier) {
return pNotifier->__dispchndmaGetNotificationShare__(pNotifier);
}
static inline NV_STATUS dispchndmaMap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispchndmaMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool dispchndmaAccessCallback_DISPATCH(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__dispchndmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS dispchndmaConstruct_IMPL(struct DispChannelDma *arg_pDispChannelDma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_dispchndmaConstruct(arg_pDispChannelDma, arg_pCallContext, arg_pParams) dispchndmaConstruct_IMPL(arg_pDispChannelDma, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#endif // DISP_CHANNEL_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DISP_CHANNEL_NVOC_H_

View File

@@ -0,0 +1,169 @@
#define NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_disp_inst_mem_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x8223e2 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* );
void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* );
void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* );
void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory;
static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory = {
/*pClassDef=*/ &__nvoc_class_def_DisplayInstanceMemory,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayInstanceMemory,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DisplayInstanceMemory, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_DisplayInstanceMemory = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory,
&__nvoc_rtti_DisplayInstanceMemory_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory =
{
/*classInfo=*/ {
/*size=*/ sizeof(DisplayInstanceMemory),
/*classId=*/ classId(DisplayInstanceMemory),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "DisplayInstanceMemory",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayInstanceMemory,
/*pCastInfo=*/ &__nvoc_castinfo_DisplayInstanceMemory,
/*pExportInfo=*/ &__nvoc_export_info_DisplayInstanceMemory
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory *pThis) {
__nvoc_instmemDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) {
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(dispIpHal);
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail_Object;
__nvoc_init_dataField_DisplayInstanceMemory(pThis, pRmhalspecowner);
status = __nvoc_instmemConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail__init;
goto __nvoc_ctor_DisplayInstanceMemory_exit; // Success
__nvoc_ctor_DisplayInstanceMemory_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_DisplayInstanceMemory_fail_Object:
__nvoc_ctor_DisplayInstanceMemory_exit:
return status;
}
static void __nvoc_init_funcTable_DisplayInstanceMemory_1(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) {
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(dispIpHal);
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
}
void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_DisplayInstanceMemory_1(pThis, pRmhalspecowner);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_DisplayInstanceMemory = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_DisplayInstanceMemory(pThis, pRmhalspecowner);
}
NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
DisplayInstanceMemory *pThis;
RmHalspecOwner *pRmhalspecowner;
pThis = portMemAllocNonPaged(sizeof(DisplayInstanceMemory));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(DisplayInstanceMemory));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DisplayInstanceMemory);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_DisplayInstanceMemory(pThis, pRmhalspecowner);
status = __nvoc_ctor_DisplayInstanceMemory(pThis, pRmhalspecowner);
if (status != NV_OK) goto __nvoc_objCreate_DisplayInstanceMemory_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_DisplayInstanceMemory_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_DisplayInstanceMemory(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,358 @@
#ifndef _G_DISP_INST_MEM_NVOC_H_
#define _G_DISP_INST_MEM_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_disp_inst_mem_nvoc.h"
#ifndef DISPLAY_INSTANCE_MEMORY_H
#define DISPLAY_INSTANCE_MEMORY_H
/* ------------------------ Includes --------------------------------------- */
#include "nvtypes.h"
#include "nvoc/utility.h"
#include "gpu/disp/kern_disp.h"
#include "gpu/mem_mgr/virt_mem_allocator_common.h"
#include "gpu/mem_mgr/mem_desc.h"
/* ------------------------ Forward Declaration ---------------------------- */
typedef struct OBJEHEAP OBJEHEAP;
struct DispChannel;
#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__
#define __NVOC_CLASS_DispChannel_TYPEDEF__
typedef struct DispChannel DispChannel;
#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */
#ifndef __nvoc_class_id_DispChannel
#define __nvoc_class_id_DispChannel 0xbd2ff3
#endif /* __nvoc_class_id_DispChannel */
struct ContextDma;
#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__
#define __NVOC_CLASS_ContextDma_TYPEDEF__
typedef struct ContextDma ContextDma;
#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */
#ifndef __nvoc_class_id_ContextDma
#define __nvoc_class_id_ContextDma 0x88441b
#endif /* __nvoc_class_id_ContextDma */
/* ------------------------ Macros & Defines ------------------------------- */
#define KERNEL_DISPLAY_GET_INST_MEM(p) ((p)->pInst)
#define DISP_INST_MEM_ALIGN 0x10000
/* ------------------------ Types definitions ------------------------------ */
/*!
* A software hash table entry
*/
typedef struct
{
struct ContextDma *pContextDma;
struct DispChannel *pDispChannel;
} SW_HASH_TABLE_ENTRY;
#ifdef NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DisplayInstanceMemory {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct DisplayInstanceMemory *__nvoc_pbase_DisplayInstanceMemory;
NV_ADDRESS_SPACE instMemAddrSpace;
NvU32 instMemAttr;
NvU64 instMemBase;
NvU32 instMemSize;
MEMORY_DESCRIPTOR *pAllocedInstMemDesc;
MEMORY_DESCRIPTOR *pInstMemDesc;
void *pInstMem;
NvU32 nHashTableEntries;
NvU32 hashTableBaseAddr;
SW_HASH_TABLE_ENTRY *pHashTable;
OBJEHEAP *pInstHeap;
};
#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__
#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__
typedef struct DisplayInstanceMemory DisplayInstanceMemory;
#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */
#ifndef __nvoc_class_id_DisplayInstanceMemory
#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2
#endif /* __nvoc_class_id_DisplayInstanceMemory */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory;
#define __staticCast_DisplayInstanceMemory(pThis) \
((pThis)->__nvoc_pbase_DisplayInstanceMemory)
#ifdef __nvoc_disp_inst_mem_h_disabled
#define __dynamicCast_DisplayInstanceMemory(pThis) ((DisplayInstanceMemory*)NULL)
#else //__nvoc_disp_inst_mem_h_disabled
#define __dynamicCast_DisplayInstanceMemory(pThis) \
((DisplayInstanceMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayInstanceMemory)))
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32);
#define __objCreate_DisplayInstanceMemory(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_DisplayInstanceMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
void instmemGetSize_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemGetSize(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize_v03_00(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemGetSize_HAL(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize)
NvU32 instmemGetHashTableBaseAddr_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NvU32 instmemGetHashTableBaseAddr(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return 0;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemGetHashTableBaseAddr(pGpu, pInstMem) instmemGetHashTableBaseAddr_v03_00(pGpu, pInstMem)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem) instmemGetHashTableBaseAddr(pGpu, pInstMem)
NvBool instmemIsValid_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NvBool instmemIsValid(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_FALSE;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemIsValid(pGpu, pInstMem, offset) instmemIsValid_v03_00(pGpu, pInstMem, offset)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemIsValid_HAL(pGpu, pInstMem, offset) instmemIsValid(pGpu, pInstMem, offset)
NvU32 instmemGenerateHashTableData_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NvU32 instmemGenerateHashTableData(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return 0;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData_v03_00(pGpu, pInstMem, hClient, offset, dispChannelNum)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemGenerateHashTableData_HAL(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum)
NV_STATUS instmemHashFunc_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemHashFunc(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc_v03_00(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemHashFunc_HAL(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result)
NV_STATUS instmemCommitContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemCommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemCommitContextDma(pGpu, pInstMem, pContextDma) instmemCommitContextDma_v03_00(pGpu, pInstMem, pContextDma)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemCommitContextDma(pGpu, pInstMem, pContextDma)
static inline void instmemDecommitContextDma_b3696a(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) {
return;
}
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemDecommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemDecommitContextDma(pGpu, pInstMem, pContextDma) instmemDecommitContextDma_b3696a(pGpu, pInstMem, pContextDma)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemDecommitContextDma(pGpu, pInstMem, pContextDma)
NV_STATUS instmemUpdateContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemUpdateContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma_v03_00(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo)
#endif //__nvoc_disp_inst_mem_h_disabled
#define instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo)
NV_STATUS instmemConstruct_IMPL(struct DisplayInstanceMemory *arg_pInstMem);
#define __nvoc_instmemConstruct(arg_pInstMem) instmemConstruct_IMPL(arg_pInstMem)
void instmemDestruct_IMPL(struct DisplayInstanceMemory *pInstMem);
#define __nvoc_instmemDestruct(pInstMem) instmemDestruct_IMPL(pInstMem)
NV_STATUS instmemStateInitLocked_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemStateInitLocked(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemStateInitLocked(pGpu, pInstMem) instmemStateInitLocked_IMPL(pGpu, pInstMem)
#endif //__nvoc_disp_inst_mem_h_disabled
void instmemStateDestroy_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemStateDestroy(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemStateDestroy(pGpu, pInstMem) instmemStateDestroy_IMPL(pGpu, pInstMem)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemStateLoad_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemStateLoad(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemStateLoad(pGpu, pInstMem, flags) instmemStateLoad_IMPL(pGpu, pInstMem, flags)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemStateUnload_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemStateUnload(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemStateUnload(pGpu, pInstMem, flags) instmemStateUnload_IMPL(pGpu, pInstMem, flags)
#endif //__nvoc_disp_inst_mem_h_disabled
void instmemSetMemory_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemSetMemory(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemSetMemory(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) instmemSetMemory_IMPL(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemBindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemBindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemBindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemUnbindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemUnbindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemUnbindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel)
#endif //__nvoc_disp_inst_mem_h_disabled
void instmemUnbindContextDmaFromAllChannels_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemUnbindContextDmaFromAllChannels(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma) instmemUnbindContextDmaFromAllChannels_IMPL(pGpu, pInstMem, pContextDma)
#endif //__nvoc_disp_inst_mem_h_disabled
void instmemUnbindDispChannelContextDmas_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline void instmemUnbindDispChannelContextDmas(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel) instmemUnbindDispChannelContextDmas_IMPL(pGpu, pInstMem, pDispChannel)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemReserveContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemReserveContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemReserveContextDma(pGpu, pInstMem, offset) instmemReserveContextDma_IMPL(pGpu, pInstMem, offset)
#endif //__nvoc_disp_inst_mem_h_disabled
NV_STATUS instmemFreeContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset);
#ifdef __nvoc_disp_inst_mem_h_disabled
static inline NV_STATUS instmemFreeContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) {
NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_disp_inst_mem_h_disabled
#define instmemFreeContextDma(pGpu, pInstMem, offset) instmemFreeContextDma_IMPL(pGpu, pInstMem, offset)
#endif //__nvoc_disp_inst_mem_h_disabled
#undef PRIVATE_FIELD
#endif // DISPLAY_INSTANCE_MEMORY_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DISP_INST_MEM_NVOC_H_

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,329 @@
#define NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_disp_sf_user_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xba7439 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_DispSfUser(DispSfUser*);
void __nvoc_init_funcTable_DispSfUser(DispSfUser*);
NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_DispSfUser(DispSfUser*);
void __nvoc_dtor_DispSfUser(DispSfUser*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser;
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_DispSfUser = {
/*pClassDef=*/ &__nvoc_class_def_DispSfUser,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSfUser,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_DispSfUser = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_DispSfUser_DispSfUser,
&__nvoc_rtti_DispSfUser_GpuResource,
&__nvoc_rtti_DispSfUser_RmResource,
&__nvoc_rtti_DispSfUser_RmResourceCommon,
&__nvoc_rtti_DispSfUser_RsResource,
&__nvoc_rtti_DispSfUser_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser =
{
/*classInfo=*/ {
/*size=*/ sizeof(DispSfUser),
/*classId=*/ classId(DispSfUser),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "DispSfUser",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSfUser,
/*pCastInfo=*/ &__nvoc_castinfo_DispSfUser,
/*pExportInfo=*/ &__nvoc_export_info_DispSfUser
};
static NV_STATUS __nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return dispsfGetRegBaseOffsetAndSize((struct DispSfUser *)(((unsigned char *)pDispSfUser) - __nvoc_rtti_DispSfUser_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_dispsfControlLookup(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_DispSfUser(DispSfUser *pThis) {
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_DispSfUser(DispSfUser *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail_GpuResource;
__nvoc_init_dataField_DispSfUser(pThis);
status = __nvoc_dispsfConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail__init;
goto __nvoc_ctor_DispSfUser_exit; // Success
__nvoc_ctor_DispSfUser_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_DispSfUser_fail_GpuResource:
__nvoc_ctor_DispSfUser_exit:
return status;
}
static void __nvoc_init_funcTable_DispSfUser_1(DispSfUser *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__dispsfGetRegBaseOffsetAndSize__ = &dispsfGetRegBaseOffsetAndSize_IMPL;
pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize;
pThis->__dispsfShareCallback__ = &__nvoc_thunk_GpuResource_dispsfShareCallback;
pThis->__dispsfControl__ = &__nvoc_thunk_GpuResource_dispsfControl;
pThis->__dispsfUnmap__ = &__nvoc_thunk_GpuResource_dispsfUnmap;
pThis->__dispsfGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispsfGetMemInterMapParams;
pThis->__dispsfGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor;
pThis->__dispsfGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispsfGetMapAddrSpace;
pThis->__dispsfGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle;
pThis->__dispsfControlFilter__ = &__nvoc_thunk_RsResource_dispsfControlFilter;
pThis->__dispsfAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispsfAddAdditionalDependants;
pThis->__dispsfGetRefCount__ = &__nvoc_thunk_RsResource_dispsfGetRefCount;
pThis->__dispsfCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispsfCheckMemInterUnmap;
pThis->__dispsfMapTo__ = &__nvoc_thunk_RsResource_dispsfMapTo;
pThis->__dispsfControl_Prologue__ = &__nvoc_thunk_RmResource_dispsfControl_Prologue;
pThis->__dispsfCanCopy__ = &__nvoc_thunk_RsResource_dispsfCanCopy;
pThis->__dispsfInternalControlForward__ = &__nvoc_thunk_GpuResource_dispsfInternalControlForward;
pThis->__dispsfPreDestruct__ = &__nvoc_thunk_RsResource_dispsfPreDestruct;
pThis->__dispsfUnmapFrom__ = &__nvoc_thunk_RsResource_dispsfUnmapFrom;
pThis->__dispsfControl_Epilogue__ = &__nvoc_thunk_RmResource_dispsfControl_Epilogue;
pThis->__dispsfControlLookup__ = &__nvoc_thunk_RsResource_dispsfControlLookup;
pThis->__dispsfMap__ = &__nvoc_thunk_GpuResource_dispsfMap;
pThis->__dispsfAccessCallback__ = &__nvoc_thunk_RmResource_dispsfAccessCallback;
}
void __nvoc_init_funcTable_DispSfUser(DispSfUser *pThis) {
__nvoc_init_funcTable_DispSfUser_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_DispSfUser(DispSfUser *pThis) {
pThis->__nvoc_pbase_DispSfUser = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_DispSfUser(pThis);
}
NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
DispSfUser *pThis;
pThis = portMemAllocNonPaged(sizeof(DispSfUser));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(DispSfUser));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSfUser);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_DispSfUser(pThis);
status = __nvoc_ctor_DispSfUser(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_DispSfUser_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_DispSfUser_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_DispSfUser(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,239 @@
#ifndef _G_DISP_SF_USER_NVOC_H_
#define _G_DISP_SF_USER_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/******************************************************************************
*
* Description:
* This file contains functions managing DispSfUser class.
*
******************************************************************************/
#include "g_disp_sf_user_nvoc.h"
#ifndef DISP_SF_USER_H
#define DISP_SF_USER_H
#include "gpu/gpu_resource.h"
/*!
* RM internal class representing NVXXXX_DISP_SF_USER
*/
#ifdef NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct DispSfUser {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct DispSfUser *__nvoc_pbase_DispSfUser;
NV_STATUS (*__dispsfGetRegBaseOffsetAndSize__)(struct DispSfUser *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__dispsfShareCallback__)(struct DispSfUser *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__dispsfControl__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispsfUnmap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__dispsfGetMemInterMapParams__)(struct DispSfUser *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__dispsfGetMemoryMappingDescriptor__)(struct DispSfUser *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__dispsfGetMapAddrSpace__)(struct DispSfUser *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__dispsfGetInternalObjectHandle__)(struct DispSfUser *);
NV_STATUS (*__dispsfControlFilter__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__dispsfAddAdditionalDependants__)(struct RsClient *, struct DispSfUser *, RsResourceRef *);
NvU32 (*__dispsfGetRefCount__)(struct DispSfUser *);
NV_STATUS (*__dispsfCheckMemInterUnmap__)(struct DispSfUser *, NvBool);
NV_STATUS (*__dispsfMapTo__)(struct DispSfUser *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__dispsfControl_Prologue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__dispsfCanCopy__)(struct DispSfUser *);
NV_STATUS (*__dispsfInternalControlForward__)(struct DispSfUser *, NvU32, void *, NvU32);
void (*__dispsfPreDestruct__)(struct DispSfUser *);
NV_STATUS (*__dispsfUnmapFrom__)(struct DispSfUser *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__dispsfControl_Epilogue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__dispsfControlLookup__)(struct DispSfUser *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__dispsfMap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__dispsfAccessCallback__)(struct DispSfUser *, struct RsClient *, void *, RsAccessRight);
NvU32 ControlOffset;
NvU32 ControlLength;
};
#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__
#define __NVOC_CLASS_DispSfUser_TYPEDEF__
typedef struct DispSfUser DispSfUser;
#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */
#ifndef __nvoc_class_id_DispSfUser
#define __nvoc_class_id_DispSfUser 0xba7439
#endif /* __nvoc_class_id_DispSfUser */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser;
#define __staticCast_DispSfUser(pThis) \
((pThis)->__nvoc_pbase_DispSfUser)
#ifdef __nvoc_disp_sf_user_h_disabled
#define __dynamicCast_DispSfUser(pThis) ((DispSfUser*)NULL)
#else //__nvoc_disp_sf_user_h_disabled
#define __dynamicCast_DispSfUser(pThis) \
((DispSfUser*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSfUser)))
#endif //__nvoc_disp_sf_user_h_disabled
NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_DispSfUser(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_DispSfUser((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define dispsfGetRegBaseOffsetAndSize(pDispSfUser, pGpu, pOffset, pSize) dispsfGetRegBaseOffsetAndSize_DISPATCH(pDispSfUser, pGpu, pOffset, pSize)
#define dispsfShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispsfShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define dispsfControl(pGpuResource, pCallContext, pParams) dispsfControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define dispsfUnmap(pGpuResource, pCallContext, pCpuMapping) dispsfUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define dispsfGetMemInterMapParams(pRmResource, pParams) dispsfGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define dispsfGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispsfGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define dispsfGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispsfGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define dispsfGetInternalObjectHandle(pGpuResource) dispsfGetInternalObjectHandle_DISPATCH(pGpuResource)
#define dispsfControlFilter(pResource, pCallContext, pParams) dispsfControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define dispsfAddAdditionalDependants(pClient, pResource, pReference) dispsfAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define dispsfGetRefCount(pResource) dispsfGetRefCount_DISPATCH(pResource)
#define dispsfCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispsfCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define dispsfMapTo(pResource, pParams) dispsfMapTo_DISPATCH(pResource, pParams)
#define dispsfControl_Prologue(pResource, pCallContext, pParams) dispsfControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define dispsfCanCopy(pResource) dispsfCanCopy_DISPATCH(pResource)
#define dispsfInternalControlForward(pGpuResource, command, pParams, size) dispsfInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define dispsfPreDestruct(pResource) dispsfPreDestruct_DISPATCH(pResource)
#define dispsfUnmapFrom(pResource, pParams) dispsfUnmapFrom_DISPATCH(pResource, pParams)
#define dispsfControl_Epilogue(pResource, pCallContext, pParams) dispsfControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define dispsfControlLookup(pResource, pParams, ppEntry) dispsfControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define dispsfMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispsfMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define dispsfAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispsfAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS dispsfGetRegBaseOffsetAndSize_IMPL(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
static inline NV_STATUS dispsfGetRegBaseOffsetAndSize_DISPATCH(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pDispSfUser->__dispsfGetRegBaseOffsetAndSize__(pDispSfUser, pGpu, pOffset, pSize);
}
static inline NvBool dispsfShareCallback_DISPATCH(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__dispsfShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS dispsfControl_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__dispsfControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS dispsfUnmap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispsfUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS dispsfGetMemInterMapParams_DISPATCH(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__dispsfGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS dispsfGetMemoryMappingDescriptor_DISPATCH(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__dispsfGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS dispsfGetMapAddrSpace_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__dispsfGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle dispsfGetInternalObjectHandle_DISPATCH(struct DispSfUser *pGpuResource) {
return pGpuResource->__dispsfGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS dispsfControlFilter_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispsfControlFilter__(pResource, pCallContext, pParams);
}
static inline void dispsfAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) {
pResource->__dispsfAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 dispsfGetRefCount_DISPATCH(struct DispSfUser *pResource) {
return pResource->__dispsfGetRefCount__(pResource);
}
static inline NV_STATUS dispsfCheckMemInterUnmap_DISPATCH(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__dispsfCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS dispsfMapTo_DISPATCH(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__dispsfMapTo__(pResource, pParams);
}
static inline NV_STATUS dispsfControl_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__dispsfControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool dispsfCanCopy_DISPATCH(struct DispSfUser *pResource) {
return pResource->__dispsfCanCopy__(pResource);
}
static inline NV_STATUS dispsfInternalControlForward_DISPATCH(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__dispsfInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void dispsfPreDestruct_DISPATCH(struct DispSfUser *pResource) {
pResource->__dispsfPreDestruct__(pResource);
}
static inline NV_STATUS dispsfUnmapFrom_DISPATCH(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__dispsfUnmapFrom__(pResource, pParams);
}
static inline void dispsfControl_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__dispsfControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS dispsfControlLookup_DISPATCH(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__dispsfControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS dispsfMap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__dispsfMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool dispsfAccessCallback_DISPATCH(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__dispsfAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS dispsfConstruct_IMPL(struct DispSfUser *arg_pDispSfUser, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_dispsfConstruct(arg_pDispSfUser, arg_pCallContext, arg_pParams) dispsfConstruct_IMPL(arg_pDispSfUser, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#endif // DISP_SF_USER_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_DISP_SF_USER_NVOC_H_

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,189 @@
#define NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_eng_state_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x7a7ed6 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE*);
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE;
static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_OBJENGSTATE = {
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJENGSTATE,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJENGSTATE, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJENGSTATE = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_OBJENGSTATE_OBJENGSTATE,
&__nvoc_rtti_OBJENGSTATE_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJENGSTATE),
/*classId=*/ classId(OBJENGSTATE),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJENGSTATE",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJENGSTATE,
/*pCastInfo=*/ &__nvoc_castinfo_OBJENGSTATE,
/*pExportInfo=*/ &__nvoc_export_info_OBJENGSTATE
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE *pThis) {
__nvoc_engstateDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJENGSTATE_fail_Object;
__nvoc_init_dataField_OBJENGSTATE(pThis);
goto __nvoc_ctor_OBJENGSTATE_exit; // Success
__nvoc_ctor_OBJENGSTATE_fail_Object:
__nvoc_ctor_OBJENGSTATE_exit:
return status;
}
static void __nvoc_init_funcTable_OBJENGSTATE_1(OBJENGSTATE *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__engstateConstructEngine__ = &engstateConstructEngine_IMPL;
pThis->__engstateInitMissing__ = &engstateInitMissing_IMPL;
pThis->__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL;
pThis->__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL;
pThis->__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL;
pThis->__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL;
pThis->__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL;
pThis->__engstateStateLoad__ = &engstateStateLoad_IMPL;
pThis->__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL;
pThis->__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL;
pThis->__engstateStateUnload__ = &engstateStateUnload_IMPL;
pThis->__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL;
pThis->__engstateStateDestroy__ = &engstateStateDestroy_IMPL;
pThis->__engstateAllocTunableState__ = &engstateAllocTunableState_IMPL;
pThis->__engstateFreeTunableState__ = &engstateFreeTunableState_IMPL;
pThis->__engstateGetTunableState__ = &engstateGetTunableState_IMPL;
pThis->__engstateSetTunableState__ = &engstateSetTunableState_IMPL;
pThis->__engstateReconcileTunableState__ = &engstateReconcileTunableState_IMPL;
pThis->__engstateCompareTunableState__ = &engstateCompareTunableState_IMPL;
pThis->__engstateIsPresent__ = &engstateIsPresent_IMPL;
}
void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE *pThis) {
__nvoc_init_funcTable_OBJENGSTATE_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_OBJENGSTATE(OBJENGSTATE *pThis) {
pThis->__nvoc_pbase_OBJENGSTATE = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_OBJENGSTATE(pThis);
}
NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJENGSTATE *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJENGSTATE));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJENGSTATE));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJENGSTATE);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJENGSTATE(pThis);
status = __nvoc_ctor_OBJENGSTATE(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJENGSTATE_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJENGSTATE_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJENGSTATE(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,385 @@
#ifndef _G_ENG_STATE_NVOC_H_
#define _G_ENG_STATE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_eng_state_nvoc.h"
#ifndef _ENG_STATE_H_
#define _ENG_STATE_H_
/*!
* @file eng_state.h
* @brief Provides definitions for all OBJENGSTATE data structures and interfaces.
*/
#include "core/core.h"
#include "gpu/eng_desc.h"
typedef enum ENGSTATE_STATE
{
ENGSTATE_STATE_UNDEFINED = 0,
ENGSTATE_STATE_CONSTRUCT,
ENGSTATE_STATE_PRE_INIT,
ENGSTATE_STATE_INIT,
ENGSTATE_STATE_PRE_LOAD,
ENGSTATE_STATE_LOAD,
ENGSTATE_STATE_POST_LOAD,
ENGSTATE_STATE_PRE_UNLOAD,
ENGSTATE_STATE_UNLOAD,
ENGSTATE_STATE_POST_UNLOAD,
ENGSTATE_STATE_DESTROY,
ENGSTATE_STATE_COUNT // Keep this last
} ENGSTATE_STATE;
// Stats data stored for every state transition
typedef struct ENGSTATE_STATS
{
NvS32 memoryAllocCount;
NvS32 memoryAllocSize;
NvU32 transitionTimeUs;
} ENGSTATE_STATS;
// Temporary transition data, not stored
typedef struct ENGSTATE_TRANSITION_DATA
{
NvS64 memoryAllocCount;
NvS64 memoryAllocSize;
NvU64 transitionStartTimeNs;
} ENGSTATE_TRANSITION_DATA;
typedef struct OBJENGSTATE *POBJENGSTATE;
#define ENG_GET_FIFO(p) (engstateGetFifo(staticCast((p), OBJENGSTATE)))
#define ENG_GET_ENG_DESC(p) (staticCast((p), OBJENGSTATE)->engDesc)
/*!
* Defines the structure used to contain all generic information related to
* the OBJENGSTATE.
*/
#ifdef NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJENGSTATE {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
NV_STATUS (*__engstateConstructEngine__)(POBJGPU, POBJENGSTATE, ENGDESCRIPTOR);
void (*__engstateInitMissing__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateStatePreInitLocked__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateStatePreInitUnlocked__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateStateInitLocked__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateStateInitUnlocked__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateStatePreLoad__)(POBJGPU, POBJENGSTATE, NvU32);
NV_STATUS (*__engstateStateLoad__)(POBJGPU, POBJENGSTATE, NvU32);
NV_STATUS (*__engstateStatePostLoad__)(POBJGPU, POBJENGSTATE, NvU32);
NV_STATUS (*__engstateStatePreUnload__)(POBJGPU, POBJENGSTATE, NvU32);
NV_STATUS (*__engstateStateUnload__)(POBJGPU, POBJENGSTATE, NvU32);
NV_STATUS (*__engstateStatePostUnload__)(POBJGPU, POBJENGSTATE, NvU32);
void (*__engstateStateDestroy__)(POBJGPU, POBJENGSTATE);
NV_STATUS (*__engstateAllocTunableState__)(POBJGPU, POBJENGSTATE, void **);
void (*__engstateFreeTunableState__)(POBJGPU, POBJENGSTATE, void *);
NV_STATUS (*__engstateGetTunableState__)(POBJGPU, POBJENGSTATE, void *);
NV_STATUS (*__engstateSetTunableState__)(POBJGPU, POBJENGSTATE, void *);
NV_STATUS (*__engstateReconcileTunableState__)(POBJGPU, POBJENGSTATE, void *);
NV_STATUS (*__engstateCompareTunableState__)(POBJGPU, POBJENGSTATE, void *, void *);
NvBool (*__engstateIsPresent__)(POBJGPU, POBJENGSTATE);
NvBool PDB_PROP_ENGSTATE_IS_MISSING;
ENGDESCRIPTOR engDesc;
void *pOriginalTunableState;
struct OBJGPU *pGpu;
ENGSTATE_STATE currentState;
ENGSTATE_STATS stats[11];
char name[100];
};
#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__
#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__
typedef struct OBJENGSTATE OBJENGSTATE;
#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJENGSTATE
#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6
#endif /* __nvoc_class_id_OBJENGSTATE */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
#define __staticCast_OBJENGSTATE(pThis) \
((pThis)->__nvoc_pbase_OBJENGSTATE)
#ifdef __nvoc_eng_state_h_disabled
#define __dynamicCast_OBJENGSTATE(pThis) ((OBJENGSTATE*)NULL)
#else //__nvoc_eng_state_h_disabled
#define __dynamicCast_OBJENGSTATE(pThis) \
((OBJENGSTATE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJENGSTATE)))
#endif //__nvoc_eng_state_h_disabled
#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_CAST
#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32);
#define __objCreate_OBJENGSTATE(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJENGSTATE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define engstateConstructEngine(pGpu, pEngstate, arg0) engstateConstructEngine_DISPATCH(pGpu, pEngstate, arg0)
#define engstateInitMissing(pGpu, pEngstate) engstateInitMissing_DISPATCH(pGpu, pEngstate)
#define engstateStatePreInitLocked(pGpu, pEngstate) engstateStatePreInitLocked_DISPATCH(pGpu, pEngstate)
#define engstateStatePreInitUnlocked(pGpu, pEngstate) engstateStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define engstateStateInitLocked(pGpu, pEngstate) engstateStateInitLocked_DISPATCH(pGpu, pEngstate)
#define engstateStateInitUnlocked(pGpu, pEngstate) engstateStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define engstateStatePreLoad(pGpu, pEngstate, arg0) engstateStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStateLoad(pGpu, pEngstate, arg0) engstateStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStatePostLoad(pGpu, pEngstate, arg0) engstateStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStatePreUnload(pGpu, pEngstate, arg0) engstateStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStateUnload(pGpu, pEngstate, arg0) engstateStateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStatePostUnload(pGpu, pEngstate, arg0) engstateStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define engstateStateDestroy(pGpu, pEngstate) engstateStateDestroy_DISPATCH(pGpu, pEngstate)
#define engstateAllocTunableState(pGpu, pEngstate, ppTunableState) engstateAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
#define engstateFreeTunableState(pGpu, pEngstate, pTunableState) engstateFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define engstateGetTunableState(pGpu, pEngstate, pTunableState) engstateGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define engstateSetTunableState(pGpu, pEngstate, pTunableState) engstateSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define engstateReconcileTunableState(pGpu, pEngstate, pTunableState) engstateReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define engstateCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) engstateCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
#define engstateIsPresent(pGpu, pEngstate) engstateIsPresent_DISPATCH(pGpu, pEngstate)
NV_STATUS engstateConstructEngine_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0);
static inline NV_STATUS engstateConstructEngine_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0) {
return pEngstate->__engstateConstructEngine__(pGpu, pEngstate, arg0);
}
void engstateInitMissing_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline void engstateInitMissing_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
pEngstate->__engstateInitMissing__(pGpu, pEngstate);
}
NV_STATUS engstateStatePreInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline NV_STATUS engstateStatePreInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
return pEngstate->__engstateStatePreInitLocked__(pGpu, pEngstate);
}
NV_STATUS engstateStatePreInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline NV_STATUS engstateStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
return pEngstate->__engstateStatePreInitUnlocked__(pGpu, pEngstate);
}
NV_STATUS engstateStateInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline NV_STATUS engstateStateInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
return pEngstate->__engstateStateInitLocked__(pGpu, pEngstate);
}
NV_STATUS engstateStateInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline NV_STATUS engstateStateInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
return pEngstate->__engstateStateInitUnlocked__(pGpu, pEngstate);
}
NV_STATUS engstateStatePreLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStatePreLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStatePreLoad__(pGpu, pEngstate, arg0);
}
NV_STATUS engstateStateLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStateLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStateLoad__(pGpu, pEngstate, arg0);
}
NV_STATUS engstateStatePostLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStatePostLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStatePostLoad__(pGpu, pEngstate, arg0);
}
NV_STATUS engstateStatePreUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStatePreUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStatePreUnload__(pGpu, pEngstate, arg0);
}
NV_STATUS engstateStateUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStateUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStateUnload__(pGpu, pEngstate, arg0);
}
NV_STATUS engstateStatePostUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
static inline NV_STATUS engstateStatePostUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
return pEngstate->__engstateStatePostUnload__(pGpu, pEngstate, arg0);
}
void engstateStateDestroy_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline void engstateStateDestroy_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
pEngstate->__engstateStateDestroy__(pGpu, pEngstate);
}
NV_STATUS engstateAllocTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState);
static inline NV_STATUS engstateAllocTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState) {
return pEngstate->__engstateAllocTunableState__(pGpu, pEngstate, ppTunableState);
}
void engstateFreeTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
static inline void engstateFreeTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
pEngstate->__engstateFreeTunableState__(pGpu, pEngstate, pTunableState);
}
NV_STATUS engstateGetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
static inline NV_STATUS engstateGetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
return pEngstate->__engstateGetTunableState__(pGpu, pEngstate, pTunableState);
}
NV_STATUS engstateSetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
static inline NV_STATUS engstateSetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
return pEngstate->__engstateSetTunableState__(pGpu, pEngstate, pTunableState);
}
NV_STATUS engstateReconcileTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
static inline NV_STATUS engstateReconcileTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
return pEngstate->__engstateReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
NV_STATUS engstateCompareTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2);
static inline NV_STATUS engstateCompareTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2) {
return pEngstate->__engstateCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
}
NvBool engstateIsPresent_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
static inline NvBool engstateIsPresent_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
return pEngstate->__engstateIsPresent__(pGpu, pEngstate);
}
NV_STATUS engstateConstructBase_IMPL(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2);
#ifdef __nvoc_eng_state_h_disabled
static inline NV_STATUS engstateConstructBase(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_eng_state_h_disabled
#define engstateConstructBase(arg0, arg1, arg2) engstateConstructBase_IMPL(arg0, arg1, arg2)
#endif //__nvoc_eng_state_h_disabled
void engstateLogStateTransitionPre_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2);
#ifdef __nvoc_eng_state_h_disabled
static inline void engstateLogStateTransitionPre(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
}
#else //__nvoc_eng_state_h_disabled
#define engstateLogStateTransitionPre(arg0, arg1, arg2) engstateLogStateTransitionPre_IMPL(arg0, arg1, arg2)
#endif //__nvoc_eng_state_h_disabled
void engstateLogStateTransitionPost_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2);
#ifdef __nvoc_eng_state_h_disabled
static inline void engstateLogStateTransitionPost(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
}
#else //__nvoc_eng_state_h_disabled
#define engstateLogStateTransitionPost(arg0, arg1, arg2) engstateLogStateTransitionPost_IMPL(arg0, arg1, arg2)
#endif //__nvoc_eng_state_h_disabled
const char *engstateGetName_IMPL(struct OBJENGSTATE *arg0);
#ifdef __nvoc_eng_state_h_disabled
static inline const char *engstateGetName(struct OBJENGSTATE *arg0) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
return NULL;
}
#else //__nvoc_eng_state_h_disabled
#define engstateGetName(arg0) engstateGetName_IMPL(arg0)
#endif //__nvoc_eng_state_h_disabled
void engstateDestruct_IMPL(POBJENGSTATE pEngstate);
#define __nvoc_engstateDestruct(pEngstate) engstateDestruct_IMPL(pEngstate)
NV_STATUS engstateStatePreInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
#ifdef __nvoc_eng_state_h_disabled
static inline NV_STATUS engstateStatePreInit(POBJGPU pGpu, POBJENGSTATE pEngstate) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_eng_state_h_disabled
#define engstateStatePreInit(pGpu, pEngstate) engstateStatePreInit_IMPL(pGpu, pEngstate)
#endif //__nvoc_eng_state_h_disabled
NV_STATUS engstateStateInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
#ifdef __nvoc_eng_state_h_disabled
static inline NV_STATUS engstateStateInit(POBJGPU pGpu, POBJENGSTATE pEngstate) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_eng_state_h_disabled
#define engstateStateInit(pGpu, pEngstate) engstateStateInit_IMPL(pGpu, pEngstate)
#endif //__nvoc_eng_state_h_disabled
ENGDESCRIPTOR engstateGetDescriptor_IMPL(POBJENGSTATE pEngstate);
#ifdef __nvoc_eng_state_h_disabled
static inline ENGDESCRIPTOR engstateGetDescriptor(POBJENGSTATE pEngstate) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
ENGDESCRIPTOR ret;
portMemSet(&ret, 0, sizeof(ENGDESCRIPTOR));
return ret;
}
#else //__nvoc_eng_state_h_disabled
#define engstateGetDescriptor(pEngstate) engstateGetDescriptor_IMPL(pEngstate)
#endif //__nvoc_eng_state_h_disabled
struct OBJFIFO *engstateGetFifo_IMPL(POBJENGSTATE pEngstate);
#ifdef __nvoc_eng_state_h_disabled
static inline struct OBJFIFO *engstateGetFifo(POBJENGSTATE pEngstate) {
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
return NULL;
}
#else //__nvoc_eng_state_h_disabled
#define engstateGetFifo(pEngstate) engstateGetFifo_IMPL(pEngstate)
#endif //__nvoc_eng_state_h_disabled
#undef PRIVATE_FIELD
#endif // _ENG_STATE_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_ENG_STATE_NVOC_H_

View File

@@ -0,0 +1,379 @@
#define NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_event_buffer_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x63502b = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
void __nvoc_init_EventBuffer(EventBuffer*);
void __nvoc_init_funcTable_EventBuffer(EventBuffer*);
NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_EventBuffer(EventBuffer*);
void __nvoc_dtor_EventBuffer(EventBuffer*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer;
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_EventBuffer = {
/*pClassDef=*/ &__nvoc_class_def_EventBuffer,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_EventBuffer,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_EventBuffer = {
/*numRelatives=*/ 5,
/*relatives=*/ {
&__nvoc_rtti_EventBuffer_EventBuffer,
&__nvoc_rtti_EventBuffer_RmResource,
&__nvoc_rtti_EventBuffer_RmResourceCommon,
&__nvoc_rtti_EventBuffer_RsResource,
&__nvoc_rtti_EventBuffer_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer =
{
/*classInfo=*/ {
/*size=*/ sizeof(EventBuffer),
/*classId=*/ classId(EventBuffer),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "EventBuffer",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_EventBuffer,
/*pCastInfo=*/ &__nvoc_castinfo_EventBuffer,
/*pExportInfo=*/ &__nvoc_export_info_EventBuffer
};
static NvBool __nvoc_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), ppMemDesc);
}
static NvU32 __nvoc_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pReference);
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams);
}
static void __nvoc_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlLookup(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
#endif
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_EventBuffer[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdEnableEvent_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*flags=*/ 0x11u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x90cd0101u,
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "eventbuffertBufferCtrlCmdEnableEvent"
#endif
},
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdUpdateGet_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*flags=*/ 0x11u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x90cd0102u,
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "eventbuffertBufferCtrlCmdUpdateGet"
#endif
},
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdFlush_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x90cd0104u,
/*paramSize=*/ 0,
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "eventbuffertBufferCtrlCmdFlush"
#endif
},
{ /* [3] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x90cd0105u,
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "eventbuffertBufferCtrlCmdPostTelemetryEvent"
#endif
},
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer =
{
/*numEntries=*/ 4,
/*pExportEntries=*/ __nvoc_exported_method_def_EventBuffer
};
void __nvoc_dtor_RmResource(RmResource*);
void __nvoc_dtor_EventBuffer(EventBuffer *pThis) {
__nvoc_eventbufferDestruct(pThis);
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_EventBuffer(EventBuffer *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail_RmResource;
__nvoc_init_dataField_EventBuffer(pThis);
status = __nvoc_eventbufferConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail__init;
goto __nvoc_ctor_EventBuffer_exit; // Success
__nvoc_ctor_EventBuffer_fail__init:
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_ctor_EventBuffer_fail_RmResource:
__nvoc_ctor_EventBuffer_exit:
return status;
}
static void __nvoc_init_funcTable_EventBuffer_1(EventBuffer *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__eventbuffertBufferCtrlCmdEnableEvent__ = &eventbuffertBufferCtrlCmdEnableEvent_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__eventbuffertBufferCtrlCmdUpdateGet__ = &eventbuffertBufferCtrlCmdUpdateGet_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__eventbuffertBufferCtrlCmdFlush__ = &eventbuffertBufferCtrlCmdFlush_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ = &eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL;
#endif
pThis->__eventbufferShareCallback__ = &__nvoc_thunk_RmResource_eventbufferShareCallback;
pThis->__eventbufferCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap;
pThis->__eventbufferControl__ = &__nvoc_thunk_RsResource_eventbufferControl;
pThis->__eventbufferGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventbufferGetMemInterMapParams;
pThis->__eventbufferGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor;
pThis->__eventbufferGetRefCount__ = &__nvoc_thunk_RsResource_eventbufferGetRefCount;
pThis->__eventbufferControlFilter__ = &__nvoc_thunk_RsResource_eventbufferControlFilter;
pThis->__eventbufferAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventbufferAddAdditionalDependants;
pThis->__eventbufferUnmap__ = &__nvoc_thunk_RsResource_eventbufferUnmap;
pThis->__eventbufferControl_Prologue__ = &__nvoc_thunk_RmResource_eventbufferControl_Prologue;
pThis->__eventbufferCanCopy__ = &__nvoc_thunk_RsResource_eventbufferCanCopy;
pThis->__eventbufferMapTo__ = &__nvoc_thunk_RsResource_eventbufferMapTo;
pThis->__eventbufferPreDestruct__ = &__nvoc_thunk_RsResource_eventbufferPreDestruct;
pThis->__eventbufferUnmapFrom__ = &__nvoc_thunk_RsResource_eventbufferUnmapFrom;
pThis->__eventbufferControl_Epilogue__ = &__nvoc_thunk_RmResource_eventbufferControl_Epilogue;
pThis->__eventbufferControlLookup__ = &__nvoc_thunk_RsResource_eventbufferControlLookup;
pThis->__eventbufferMap__ = &__nvoc_thunk_RsResource_eventbufferMap;
pThis->__eventbufferAccessCallback__ = &__nvoc_thunk_RmResource_eventbufferAccessCallback;
}
void __nvoc_init_funcTable_EventBuffer(EventBuffer *pThis) {
__nvoc_init_funcTable_EventBuffer_1(pThis);
}
void __nvoc_init_RmResource(RmResource*);
void __nvoc_init_EventBuffer(EventBuffer *pThis) {
pThis->__nvoc_pbase_EventBuffer = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_init_funcTable_EventBuffer(pThis);
}
NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
EventBuffer *pThis;
pThis = portMemAllocNonPaged(sizeof(EventBuffer));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(EventBuffer));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_EventBuffer);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_EventBuffer(pThis);
status = __nvoc_ctor_EventBuffer(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_EventBuffer_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_EventBuffer_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_EventBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,288 @@
#ifndef _G_EVENT_BUFFER_NVOC_H_
#define _G_EVENT_BUFFER_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_event_buffer_nvoc.h"
#ifndef _EVENT_BUFFER_H_
#define _EVENT_BUFFER_H_
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "rmapi/event.h"
#include "rmapi/resource.h"
#include "ctrl/ctrl90cd.h"
#include "eventbufferproducer.h"
struct Memory;
#ifndef __NVOC_CLASS_Memory_TYPEDEF__
#define __NVOC_CLASS_Memory_TYPEDEF__
typedef struct Memory Memory;
#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
#ifndef __nvoc_class_id_Memory
#define __nvoc_class_id_Memory 0x4789f2
#endif /* __nvoc_class_id_Memory */
typedef struct
{
//
// Addr: user RO address
// Priv: return cookie to be passed to unmap
//
NvP64 headerAddr;
NvP64 headerPriv;
NvP64 recordBuffAddr;
NvP64 recordBuffPriv;
NvP64 vardataBuffAddr;
NvP64 vardataBuffPriv;
} EVENT_BUFFER_MAP_INFO;
// This class shares buffers between kernel and usermode
#ifdef NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct EventBuffer {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmResource __nvoc_base_RmResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct EventBuffer *__nvoc_pbase_EventBuffer;
NV_STATUS (*__eventbuffertBufferCtrlCmdEnableEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *);
NV_STATUS (*__eventbuffertBufferCtrlCmdUpdateGet__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *);
NV_STATUS (*__eventbuffertBufferCtrlCmdFlush__)(struct EventBuffer *);
NV_STATUS (*__eventbuffertBufferCtrlCmdPostTelemetryEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *);
NvBool (*__eventbufferShareCallback__)(struct EventBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__eventbufferCheckMemInterUnmap__)(struct EventBuffer *, NvBool);
NV_STATUS (*__eventbufferControl__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__eventbufferGetMemInterMapParams__)(struct EventBuffer *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__eventbufferGetMemoryMappingDescriptor__)(struct EventBuffer *, struct MEMORY_DESCRIPTOR **);
NvU32 (*__eventbufferGetRefCount__)(struct EventBuffer *);
NV_STATUS (*__eventbufferControlFilter__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__eventbufferAddAdditionalDependants__)(struct RsClient *, struct EventBuffer *, RsResourceRef *);
NV_STATUS (*__eventbufferUnmap__)(struct EventBuffer *, struct CALL_CONTEXT *, RsCpuMapping *);
NV_STATUS (*__eventbufferControl_Prologue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__eventbufferCanCopy__)(struct EventBuffer *);
NV_STATUS (*__eventbufferMapTo__)(struct EventBuffer *, RS_RES_MAP_TO_PARAMS *);
void (*__eventbufferPreDestruct__)(struct EventBuffer *);
NV_STATUS (*__eventbufferUnmapFrom__)(struct EventBuffer *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__eventbufferControl_Epilogue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__eventbufferControlLookup__)(struct EventBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__eventbufferMap__)(struct EventBuffer *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__eventbufferAccessCallback__)(struct EventBuffer *, struct RsClient *, void *, RsAccessRight);
struct MEMORY_DESCRIPTOR *pHeaderDesc;
struct MEMORY_DESCRIPTOR *pRecordBufDesc;
struct MEMORY_DESCRIPTOR *pVardataBufDesc;
NvHandle hSubDevice;
NvU32 subDeviceInst;
EVENT_BUFFER_MAP_INFO kernelMapInfo;
EVENT_BUFFER_MAP_INFO clientMapInfo;
NvHandle hClient;
NvU16 seqNo;
NvBool bNotifyPending;
PEVENTNOTIFICATION pListeners;
EVENT_BUFFER_PRODUCER_INFO producerInfo;
struct Memory *pHeader;
struct Memory *pRecord;
struct Memory *pVardata;
NvHandle hInternalClient;
NvHandle hInternalDevice;
NvHandle hInternalSubdevice;
NvHandle hInternalHeader;
NvHandle hInternalBuffer;
};
#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__
#define __NVOC_CLASS_EventBuffer_TYPEDEF__
typedef struct EventBuffer EventBuffer;
#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */
#ifndef __nvoc_class_id_EventBuffer
#define __nvoc_class_id_EventBuffer 0x63502b
#endif /* __nvoc_class_id_EventBuffer */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer;
#define __staticCast_EventBuffer(pThis) \
((pThis)->__nvoc_pbase_EventBuffer)
#ifdef __nvoc_event_buffer_h_disabled
#define __dynamicCast_EventBuffer(pThis) ((EventBuffer*)NULL)
#else //__nvoc_event_buffer_h_disabled
#define __dynamicCast_EventBuffer(pThis) \
((EventBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(EventBuffer)))
#endif //__nvoc_event_buffer_h_disabled
NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_EventBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_EventBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define eventbuffertBufferCtrlCmdEnableEvent(pEventBuffer, pEnableParams) eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(pEventBuffer, pEnableParams)
#define eventbuffertBufferCtrlCmdUpdateGet(pEventBuffer, pUpdateParams) eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(pEventBuffer, pUpdateParams)
#define eventbuffertBufferCtrlCmdFlush(pEventBuffer) eventbuffertBufferCtrlCmdFlush_DISPATCH(pEventBuffer)
#define eventbuffertBufferCtrlCmdPostTelemetryEvent(pEventBuffer, pPostTelemetryEvent) eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(pEventBuffer, pPostTelemetryEvent)
#define eventbufferShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventbufferShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
#define eventbufferCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventbufferCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define eventbufferControl(pResource, pCallContext, pParams) eventbufferControl_DISPATCH(pResource, pCallContext, pParams)
#define eventbufferGetMemInterMapParams(pRmResource, pParams) eventbufferGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define eventbufferGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventbufferGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define eventbufferGetRefCount(pResource) eventbufferGetRefCount_DISPATCH(pResource)
#define eventbufferControlFilter(pResource, pCallContext, pParams) eventbufferControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define eventbufferAddAdditionalDependants(pClient, pResource, pReference) eventbufferAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define eventbufferUnmap(pResource, pCallContext, pCpuMapping) eventbufferUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
#define eventbufferControl_Prologue(pResource, pCallContext, pParams) eventbufferControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define eventbufferCanCopy(pResource) eventbufferCanCopy_DISPATCH(pResource)
#define eventbufferMapTo(pResource, pParams) eventbufferMapTo_DISPATCH(pResource, pParams)
#define eventbufferPreDestruct(pResource) eventbufferPreDestruct_DISPATCH(pResource)
#define eventbufferUnmapFrom(pResource, pParams) eventbufferUnmapFrom_DISPATCH(pResource, pParams)
#define eventbufferControl_Epilogue(pResource, pCallContext, pParams) eventbufferControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define eventbufferControlLookup(pResource, pParams, ppEntry) eventbufferControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define eventbufferMap(pResource, pCallContext, pParams, pCpuMapping) eventbufferMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
#define eventbufferAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventbufferAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams);
static inline NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams) {
return pEventBuffer->__eventbuffertBufferCtrlCmdEnableEvent__(pEventBuffer, pEnableParams);
}
NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams);
static inline NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams) {
return pEventBuffer->__eventbuffertBufferCtrlCmdUpdateGet__(pEventBuffer, pUpdateParams);
}
NV_STATUS eventbuffertBufferCtrlCmdFlush_IMPL(struct EventBuffer *pEventBuffer);
static inline NV_STATUS eventbuffertBufferCtrlCmdFlush_DISPATCH(struct EventBuffer *pEventBuffer) {
return pEventBuffer->__eventbuffertBufferCtrlCmdFlush__(pEventBuffer);
}
NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent);
static inline NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent) {
return pEventBuffer->__eventbuffertBufferCtrlCmdPostTelemetryEvent__(pEventBuffer, pPostTelemetryEvent);
}
static inline NvBool eventbufferShareCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pResource->__eventbufferShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS eventbufferCheckMemInterUnmap_DISPATCH(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__eventbufferCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS eventbufferControl_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventbufferControl__(pResource, pCallContext, pParams);
}
static inline NV_STATUS eventbufferGetMemInterMapParams_DISPATCH(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__eventbufferGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS eventbufferGetMemoryMappingDescriptor_DISPATCH(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__eventbufferGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NvU32 eventbufferGetRefCount_DISPATCH(struct EventBuffer *pResource) {
return pResource->__eventbufferGetRefCount__(pResource);
}
static inline NV_STATUS eventbufferControlFilter_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventbufferControlFilter__(pResource, pCallContext, pParams);
}
static inline void eventbufferAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) {
pResource->__eventbufferAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS eventbufferUnmap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return pResource->__eventbufferUnmap__(pResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS eventbufferControl_Prologue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventbufferControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool eventbufferCanCopy_DISPATCH(struct EventBuffer *pResource) {
return pResource->__eventbufferCanCopy__(pResource);
}
static inline NV_STATUS eventbufferMapTo_DISPATCH(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__eventbufferMapTo__(pResource, pParams);
}
static inline void eventbufferPreDestruct_DISPATCH(struct EventBuffer *pResource) {
pResource->__eventbufferPreDestruct__(pResource);
}
static inline NV_STATUS eventbufferUnmapFrom_DISPATCH(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__eventbufferUnmapFrom__(pResource, pParams);
}
static inline void eventbufferControl_Epilogue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__eventbufferControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS eventbufferControlLookup_DISPATCH(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__eventbufferControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS eventbufferMap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return pResource->__eventbufferMap__(pResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool eventbufferAccessCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__eventbufferAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS eventbufferConstruct_IMPL(struct EventBuffer *arg_pEventBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_eventbufferConstruct(arg_pEventBuffer, arg_pCallContext, arg_pParams) eventbufferConstruct_IMPL(arg_pEventBuffer, arg_pCallContext, arg_pParams)
void eventbufferDestruct_IMPL(struct EventBuffer *pEventBuffer);
#define __nvoc_eventbufferDestruct(pEventBuffer) eventbufferDestruct_IMPL(pEventBuffer)
#undef PRIVATE_FIELD
NV_STATUS eventBufferAdd(struct EventBuffer *pEventBuffer, void* pEventData, NvU32 recordType, NvBool* bNotify, NvP64 *pHandle);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_EVENT_BUFFER_NVOC_H_

View File

@@ -0,0 +1,692 @@
#define NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_event_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xd5f150 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared;
void __nvoc_init_NotifShare(NotifShare*);
void __nvoc_init_funcTable_NotifShare(NotifShare*);
NV_STATUS __nvoc_ctor_NotifShare(NotifShare*);
void __nvoc_init_dataField_NotifShare(NotifShare*);
void __nvoc_dtor_NotifShare(NotifShare*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare;
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_NotifShare = {
/*pClassDef=*/ &__nvoc_class_def_NotifShare,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NotifShare,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_RsShared = {
/*pClassDef=*/ &__nvoc_class_def_RsShared,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_NotifShare = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_NotifShare_NotifShare,
&__nvoc_rtti_NotifShare_RsShared,
&__nvoc_rtti_NotifShare_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare =
{
/*classInfo=*/ {
/*size=*/ sizeof(NotifShare),
/*classId=*/ classId(NotifShare),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "NotifShare",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NotifShare,
/*pCastInfo=*/ &__nvoc_castinfo_NotifShare,
/*pExportInfo=*/ &__nvoc_export_info_NotifShare
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RsShared(RsShared*);
void __nvoc_dtor_NotifShare(NotifShare *pThis) {
__nvoc_shrnotifDestruct(pThis);
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_NotifShare(NotifShare *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RsShared(RsShared* );
NV_STATUS __nvoc_ctor_NotifShare(NotifShare *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared);
if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail_RsShared;
__nvoc_init_dataField_NotifShare(pThis);
status = __nvoc_shrnotifConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail__init;
goto __nvoc_ctor_NotifShare_exit; // Success
__nvoc_ctor_NotifShare_fail__init:
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
__nvoc_ctor_NotifShare_fail_RsShared:
__nvoc_ctor_NotifShare_exit:
return status;
}
static void __nvoc_init_funcTable_NotifShare_1(NotifShare *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_NotifShare(NotifShare *pThis) {
__nvoc_init_funcTable_NotifShare_1(pThis);
}
void __nvoc_init_RsShared(RsShared*);
void __nvoc_init_NotifShare(NotifShare *pThis) {
pThis->__nvoc_pbase_NotifShare = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object;
pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared;
__nvoc_init_RsShared(&pThis->__nvoc_base_RsShared);
__nvoc_init_funcTable_NotifShare(pThis);
}
NV_STATUS __nvoc_objCreate_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
NotifShare *pThis;
pThis = portMemAllocNonPaged(sizeof(NotifShare));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(NotifShare));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NotifShare);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_NotifShare(pThis);
status = __nvoc_ctor_NotifShare(pThis);
if (status != NV_OK) goto __nvoc_objCreate_NotifShare_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_NotifShare_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_NotifShare(ppThis, pParent, createFlags);
return status;
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xa4ecfc = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
void __nvoc_init_Event(Event*);
void __nvoc_init_funcTable_Event(Event*);
NV_STATUS __nvoc_ctor_Event(Event*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_Event(Event*);
void __nvoc_dtor_Event(Event*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Event;
static const struct NVOC_RTTI __nvoc_rtti_Event_Event = {
/*pClassDef=*/ &__nvoc_class_def_Event,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Event,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_Event_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_Event_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_Event_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_Event_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_Event = {
/*numRelatives=*/ 5,
/*relatives=*/ {
&__nvoc_rtti_Event_Event,
&__nvoc_rtti_Event_RmResource,
&__nvoc_rtti_Event_RmResourceCommon,
&__nvoc_rtti_Event_RsResource,
&__nvoc_rtti_Event_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_Event =
{
/*classInfo=*/ {
/*size=*/ sizeof(Event),
/*classId=*/ classId(Event),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "Event",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Event,
/*pCastInfo=*/ &__nvoc_castinfo_Event,
/*pExportInfo=*/ &__nvoc_export_info_Event
};
static NvBool __nvoc_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), ppMemDesc);
}
static NvU32 __nvoc_thunk_RsResource_eventGetRefCount(struct Event *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pReference);
}
static NV_STATUS __nvoc_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_eventCanCopy(struct Event *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams);
}
static void __nvoc_thunk_RsResource_eventPreDestruct(struct Event *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_eventControlLookup(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_Event =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RmResource(RmResource*);
void __nvoc_dtor_Event(Event *pThis) {
__nvoc_eventDestruct(pThis);
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_Event(Event *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_Event(Event *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Event_fail_RmResource;
__nvoc_init_dataField_Event(pThis);
status = __nvoc_eventConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Event_fail__init;
goto __nvoc_ctor_Event_exit; // Success
__nvoc_ctor_Event_fail__init:
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_ctor_Event_fail_RmResource:
__nvoc_ctor_Event_exit:
return status;
}
static void __nvoc_init_funcTable_Event_1(Event *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__eventShareCallback__ = &__nvoc_thunk_RmResource_eventShareCallback;
pThis->__eventCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventCheckMemInterUnmap;
pThis->__eventControl__ = &__nvoc_thunk_RsResource_eventControl;
pThis->__eventGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventGetMemInterMapParams;
pThis->__eventGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor;
pThis->__eventGetRefCount__ = &__nvoc_thunk_RsResource_eventGetRefCount;
pThis->__eventControlFilter__ = &__nvoc_thunk_RsResource_eventControlFilter;
pThis->__eventAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventAddAdditionalDependants;
pThis->__eventUnmap__ = &__nvoc_thunk_RsResource_eventUnmap;
pThis->__eventControl_Prologue__ = &__nvoc_thunk_RmResource_eventControl_Prologue;
pThis->__eventCanCopy__ = &__nvoc_thunk_RsResource_eventCanCopy;
pThis->__eventMapTo__ = &__nvoc_thunk_RsResource_eventMapTo;
pThis->__eventPreDestruct__ = &__nvoc_thunk_RsResource_eventPreDestruct;
pThis->__eventUnmapFrom__ = &__nvoc_thunk_RsResource_eventUnmapFrom;
pThis->__eventControl_Epilogue__ = &__nvoc_thunk_RmResource_eventControl_Epilogue;
pThis->__eventControlLookup__ = &__nvoc_thunk_RsResource_eventControlLookup;
pThis->__eventMap__ = &__nvoc_thunk_RsResource_eventMap;
pThis->__eventAccessCallback__ = &__nvoc_thunk_RmResource_eventAccessCallback;
}
void __nvoc_init_funcTable_Event(Event *pThis) {
__nvoc_init_funcTable_Event_1(pThis);
}
void __nvoc_init_RmResource(RmResource*);
void __nvoc_init_Event(Event *pThis) {
pThis->__nvoc_pbase_Event = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_init_funcTable_Event(pThis);
}
NV_STATUS __nvoc_objCreate_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
Event *pThis;
pThis = portMemAllocNonPaged(sizeof(Event));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(Event));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Event);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_Event(pThis);
status = __nvoc_ctor_Event(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_Event_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_Event_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_Event(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xf8f965 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
void __nvoc_init_INotifier(INotifier*);
void __nvoc_init_funcTable_INotifier(INotifier*);
NV_STATUS __nvoc_ctor_INotifier(INotifier*, struct CALL_CONTEXT * arg_pCallContext);
void __nvoc_init_dataField_INotifier(INotifier*);
void __nvoc_dtor_INotifier(INotifier*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier;
static const struct NVOC_RTTI __nvoc_rtti_INotifier_INotifier = {
/*pClassDef=*/ &__nvoc_class_def_INotifier,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_INotifier,
/*offset=*/ 0,
};
static const struct NVOC_CASTINFO __nvoc_castinfo_INotifier = {
/*numRelatives=*/ 1,
/*relatives=*/ {
&__nvoc_rtti_INotifier_INotifier,
},
};
// Not instantiable because it's not derived from class "Object"
// Not instantiable because it's an abstract class with following pure virtual functions:
// inotifyGetNotificationListPtr
// inotifySetNotificationShare
// inotifyGetNotificationShare
// inotifyUnregisterEvent
// inotifyGetOrAllocNotifShare
const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier =
{
/*classInfo=*/ {
/*size=*/ sizeof(INotifier),
/*classId=*/ classId(INotifier),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "INotifier",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
/*pCastInfo=*/ &__nvoc_castinfo_INotifier,
/*pExportInfo=*/ &__nvoc_export_info_INotifier
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_INotifier(INotifier *pThis) {
__nvoc_inotifyDestruct(pThis);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_INotifier(INotifier *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_INotifier(INotifier *pThis, struct CALL_CONTEXT * arg_pCallContext) {
NV_STATUS status = NV_OK;
__nvoc_init_dataField_INotifier(pThis);
status = __nvoc_inotifyConstruct(pThis, arg_pCallContext);
if (status != NV_OK) goto __nvoc_ctor_INotifier_fail__init;
goto __nvoc_ctor_INotifier_exit; // Success
__nvoc_ctor_INotifier_fail__init:
__nvoc_ctor_INotifier_exit:
return status;
}
static void __nvoc_init_funcTable_INotifier_1(INotifier *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__inotifyGetNotificationListPtr__ = NULL;
pThis->__inotifySetNotificationShare__ = NULL;
pThis->__inotifyGetNotificationShare__ = NULL;
pThis->__inotifyUnregisterEvent__ = NULL;
pThis->__inotifyGetOrAllocNotifShare__ = NULL;
}
void __nvoc_init_funcTable_INotifier(INotifier *pThis) {
__nvoc_init_funcTable_INotifier_1(pThis);
}
void __nvoc_init_INotifier(INotifier *pThis) {
pThis->__nvoc_pbase_INotifier = pThis;
__nvoc_init_funcTable_INotifier(pThis);
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xa8683b = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
void __nvoc_init_Notifier(Notifier*);
void __nvoc_init_funcTable_Notifier(Notifier*);
NV_STATUS __nvoc_ctor_Notifier(Notifier*, struct CALL_CONTEXT * arg_pCallContext);
void __nvoc_init_dataField_Notifier(Notifier*);
void __nvoc_dtor_Notifier(Notifier*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier;
static const struct NVOC_RTTI __nvoc_rtti_Notifier_Notifier = {
/*pClassDef=*/ &__nvoc_class_def_Notifier,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Notifier,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_Notifier_INotifier = {
/*pClassDef=*/ &__nvoc_class_def_INotifier,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Notifier, __nvoc_base_INotifier),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_Notifier = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_Notifier_Notifier,
&__nvoc_rtti_Notifier_INotifier,
},
};
// Not instantiable because it's not derived from class "Object"
const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier =
{
/*classInfo=*/ {
/*size=*/ sizeof(Notifier),
/*classId=*/ classId(Notifier),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "Notifier",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
/*pCastInfo=*/ &__nvoc_castinfo_Notifier,
/*pExportInfo=*/ &__nvoc_export_info_Notifier
};
static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier) {
return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset));
}
static struct NotifShare *__nvoc_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier) {
return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset));
}
static void __nvoc_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare) {
notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), pNotifShare);
}
static NV_STATUS __nvoc_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static NV_STATUS __nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, ppNotifShare);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_INotifier(INotifier*);
void __nvoc_dtor_Notifier(Notifier *pThis) {
__nvoc_notifyDestruct(pThis);
__nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_Notifier(Notifier *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_INotifier(INotifier* , struct CALL_CONTEXT *);
NV_STATUS __nvoc_ctor_Notifier(Notifier *pThis, struct CALL_CONTEXT * arg_pCallContext) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_INotifier(&pThis->__nvoc_base_INotifier, arg_pCallContext);
if (status != NV_OK) goto __nvoc_ctor_Notifier_fail_INotifier;
__nvoc_init_dataField_Notifier(pThis);
status = __nvoc_notifyConstruct(pThis, arg_pCallContext);
if (status != NV_OK) goto __nvoc_ctor_Notifier_fail__init;
goto __nvoc_ctor_Notifier_exit; // Success
__nvoc_ctor_Notifier_fail__init:
__nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier);
__nvoc_ctor_Notifier_fail_INotifier:
__nvoc_ctor_Notifier_exit:
return status;
}
static void __nvoc_init_funcTable_Notifier_1(Notifier *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__notifyGetNotificationListPtr__ = &notifyGetNotificationListPtr_IMPL;
pThis->__notifyGetNotificationShare__ = &notifyGetNotificationShare_IMPL;
pThis->__notifySetNotificationShare__ = &notifySetNotificationShare_IMPL;
pThis->__notifyUnregisterEvent__ = &notifyUnregisterEvent_IMPL;
pThis->__notifyGetOrAllocNotifShare__ = &notifyGetOrAllocNotifShare_IMPL;
pThis->__nvoc_base_INotifier.__inotifyGetNotificationListPtr__ = &__nvoc_thunk_Notifier_inotifyGetNotificationListPtr;
pThis->__nvoc_base_INotifier.__inotifyGetNotificationShare__ = &__nvoc_thunk_Notifier_inotifyGetNotificationShare;
pThis->__nvoc_base_INotifier.__inotifySetNotificationShare__ = &__nvoc_thunk_Notifier_inotifySetNotificationShare;
pThis->__nvoc_base_INotifier.__inotifyUnregisterEvent__ = &__nvoc_thunk_Notifier_inotifyUnregisterEvent;
pThis->__nvoc_base_INotifier.__inotifyGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare;
}
void __nvoc_init_funcTable_Notifier(Notifier *pThis) {
__nvoc_init_funcTable_Notifier_1(pThis);
}
void __nvoc_init_INotifier(INotifier*);
void __nvoc_init_Notifier(Notifier *pThis) {
pThis->__nvoc_pbase_Notifier = pThis;
pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_INotifier;
__nvoc_init_INotifier(&pThis->__nvoc_base_INotifier);
__nvoc_init_funcTable_Notifier(pThis);
}

View File

@@ -0,0 +1,529 @@
#ifndef _G_EVENT_NVOC_H_
#define _G_EVENT_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_event_nvoc.h"
#ifndef _EVENT_H_
#define _EVENT_H_
#include "class/cl0000.h" // NV0000_NOTIFIERS_MAXCOUNT
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "resserv/rs_server.h"
#include "rmapi/resource.h"
typedef struct _def_system_event_queue SYSTEM_EVENTS_QUEUE;
struct EVENTNOTIFICATION
{
NvHandle hEventClient;
NvHandle hEvent;
NvU32 subdeviceInst;
NvU32 NotifyIndex; // NVnnnn_NOTIFIERS_xyz
NvU32 NotifyType; // Event class. NV01_EVENT_OS_EVENT for example.
NvBool bUserOsEventHandle; // Event was allocated from user app.
NvBool bBroadcastEvent; // Wait for all subdevices before sending event.
NvBool bClientRM; // Event was allocated from client RM.
NvBool bSubdeviceSpecificEvent; // SubdeviceSpecificValue is valid.
NvU32 SubdeviceSpecificValue; // NV0005_NOTIFY_INDEX_SUBDEVICE
NvBool bEventDataRequired; // nv_post_event allocates memory for Data.
NvBool bNonStallIntrEvent;
NvU32 NotifyTriggerCount; // Used with bBroadcastEvent.
NvP64 Data;
struct EVENTNOTIFICATION *Next;
};
typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION;
struct INotifier;
#ifndef __NVOC_CLASS_INotifier_TYPEDEF__
#define __NVOC_CLASS_INotifier_TYPEDEF__
typedef struct INotifier INotifier;
#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */
#ifndef __nvoc_class_id_INotifier
#define __nvoc_class_id_INotifier 0xf8f965
#endif /* __nvoc_class_id_INotifier */
#define NV_SYSTEM_EVENT_QUEUE_SIZE 16
struct _def_system_event_queue
{
NvU32 Head;
NvU32 Tail;
struct event_queue
{
NvU32 event;
NvU32 status;
} EventQueue[NV_SYSTEM_EVENT_QUEUE_SIZE];
};
struct _def_client_system_event_info
{
SYSTEM_EVENTS_QUEUE systemEventsQueue;
NvU32 notifyActions[NV0000_NOTIFIERS_MAXCOUNT];
};
/**
* This class represents data that is shared between one notifier and any
* events that are registered with the notifier.
*
* Instances of this class are ref-counted and will be kept alive until
* the notifier and all of its events have been freed.
*/
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct NotifShare {
const struct NVOC_RTTI *__nvoc_rtti;
struct RsShared __nvoc_base_RsShared;
struct Object *__nvoc_pbase_Object;
struct RsShared *__nvoc_pbase_RsShared;
struct NotifShare *__nvoc_pbase_NotifShare;
struct INotifier *pNotifier;
NvHandle hNotifierClient;
NvHandle hNotifierResource;
EVENTNOTIFICATION *pEventList;
};
#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__
#define __NVOC_CLASS_NotifShare_TYPEDEF__
typedef struct NotifShare NotifShare;
#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */
#ifndef __nvoc_class_id_NotifShare
#define __nvoc_class_id_NotifShare 0xd5f150
#endif /* __nvoc_class_id_NotifShare */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare;
#define __staticCast_NotifShare(pThis) \
((pThis)->__nvoc_pbase_NotifShare)
#ifdef __nvoc_event_h_disabled
#define __dynamicCast_NotifShare(pThis) ((NotifShare*)NULL)
#else //__nvoc_event_h_disabled
#define __dynamicCast_NotifShare(pThis) \
((NotifShare*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NotifShare)))
#endif //__nvoc_event_h_disabled
NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_NotifShare(NotifShare**, Dynamic*, NvU32);
#define __objCreate_NotifShare(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_NotifShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS shrnotifConstruct_IMPL(struct NotifShare *arg_pNotifShare);
#define __nvoc_shrnotifConstruct(arg_pNotifShare) shrnotifConstruct_IMPL(arg_pNotifShare)
void shrnotifDestruct_IMPL(struct NotifShare *pNotifShare);
#define __nvoc_shrnotifDestruct(pNotifShare) shrnotifDestruct_IMPL(pNotifShare)
#undef PRIVATE_FIELD
/**
* This class represents event notification consumers
*/
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct Event {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmResource __nvoc_base_RmResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct Event *__nvoc_pbase_Event;
NvBool (*__eventShareCallback__)(struct Event *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__eventCheckMemInterUnmap__)(struct Event *, NvBool);
NV_STATUS (*__eventControl__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__eventGetMemInterMapParams__)(struct Event *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__eventGetMemoryMappingDescriptor__)(struct Event *, struct MEMORY_DESCRIPTOR **);
NvU32 (*__eventGetRefCount__)(struct Event *);
NV_STATUS (*__eventControlFilter__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__eventAddAdditionalDependants__)(struct RsClient *, struct Event *, RsResourceRef *);
NV_STATUS (*__eventUnmap__)(struct Event *, struct CALL_CONTEXT *, RsCpuMapping *);
NV_STATUS (*__eventControl_Prologue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__eventCanCopy__)(struct Event *);
NV_STATUS (*__eventMapTo__)(struct Event *, RS_RES_MAP_TO_PARAMS *);
void (*__eventPreDestruct__)(struct Event *);
NV_STATUS (*__eventUnmapFrom__)(struct Event *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__eventControl_Epilogue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__eventControlLookup__)(struct Event *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__eventMap__)(struct Event *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__eventAccessCallback__)(struct Event *, struct RsClient *, void *, RsAccessRight);
struct NotifShare *pNotifierShare;
NvHandle hNotifierClient;
NvHandle hNotifierResource;
NvHandle hEvent;
};
#ifndef __NVOC_CLASS_Event_TYPEDEF__
#define __NVOC_CLASS_Event_TYPEDEF__
typedef struct Event Event;
#endif /* __NVOC_CLASS_Event_TYPEDEF__ */
#ifndef __nvoc_class_id_Event
#define __nvoc_class_id_Event 0xa4ecfc
#endif /* __nvoc_class_id_Event */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event;
#define __staticCast_Event(pThis) \
((pThis)->__nvoc_pbase_Event)
#ifdef __nvoc_event_h_disabled
#define __dynamicCast_Event(pThis) ((Event*)NULL)
#else //__nvoc_event_h_disabled
#define __dynamicCast_Event(pThis) \
((Event*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Event)))
#endif //__nvoc_event_h_disabled
NV_STATUS __nvoc_objCreateDynamic_Event(Event**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_Event(Event**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_Event(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_Event((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define eventShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
#define eventCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define eventControl(pResource, pCallContext, pParams) eventControl_DISPATCH(pResource, pCallContext, pParams)
#define eventGetMemInterMapParams(pRmResource, pParams) eventGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define eventGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define eventGetRefCount(pResource) eventGetRefCount_DISPATCH(pResource)
#define eventControlFilter(pResource, pCallContext, pParams) eventControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define eventAddAdditionalDependants(pClient, pResource, pReference) eventAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define eventUnmap(pResource, pCallContext, pCpuMapping) eventUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
#define eventControl_Prologue(pResource, pCallContext, pParams) eventControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define eventCanCopy(pResource) eventCanCopy_DISPATCH(pResource)
#define eventMapTo(pResource, pParams) eventMapTo_DISPATCH(pResource, pParams)
#define eventPreDestruct(pResource) eventPreDestruct_DISPATCH(pResource)
#define eventUnmapFrom(pResource, pParams) eventUnmapFrom_DISPATCH(pResource, pParams)
#define eventControl_Epilogue(pResource, pCallContext, pParams) eventControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define eventControlLookup(pResource, pParams, ppEntry) eventControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define eventMap(pResource, pCallContext, pParams, pCpuMapping) eventMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
#define eventAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
static inline NvBool eventShareCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pResource->__eventShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS eventCheckMemInterUnmap_DISPATCH(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__eventCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS eventControl_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventControl__(pResource, pCallContext, pParams);
}
static inline NV_STATUS eventGetMemInterMapParams_DISPATCH(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__eventGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS eventGetMemoryMappingDescriptor_DISPATCH(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__eventGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NvU32 eventGetRefCount_DISPATCH(struct Event *pResource) {
return pResource->__eventGetRefCount__(pResource);
}
static inline NV_STATUS eventControlFilter_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventControlFilter__(pResource, pCallContext, pParams);
}
static inline void eventAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) {
pResource->__eventAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS eventUnmap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return pResource->__eventUnmap__(pResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS eventControl_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__eventControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool eventCanCopy_DISPATCH(struct Event *pResource) {
return pResource->__eventCanCopy__(pResource);
}
static inline NV_STATUS eventMapTo_DISPATCH(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__eventMapTo__(pResource, pParams);
}
static inline void eventPreDestruct_DISPATCH(struct Event *pResource) {
pResource->__eventPreDestruct__(pResource);
}
static inline NV_STATUS eventUnmapFrom_DISPATCH(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__eventUnmapFrom__(pResource, pParams);
}
static inline void eventControl_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__eventControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS eventControlLookup_DISPATCH(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__eventControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS eventMap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return pResource->__eventMap__(pResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool eventAccessCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__eventAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS eventConstruct_IMPL(struct Event *arg_pEvent, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_eventConstruct(arg_pEvent, arg_pCallContext, arg_pParams) eventConstruct_IMPL(arg_pEvent, arg_pCallContext, arg_pParams)
void eventDestruct_IMPL(struct Event *pEvent);
#define __nvoc_eventDestruct(pEvent) eventDestruct_IMPL(pEvent)
NV_STATUS eventInit_IMPL(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification);
#ifdef __nvoc_event_h_disabled
static inline NV_STATUS eventInit(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification) {
NV_ASSERT_FAILED_PRECOMP("Event was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_event_h_disabled
#define eventInit(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) eventInit_IMPL(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification)
#endif //__nvoc_event_h_disabled
#undef PRIVATE_FIELD
/**
* Mix-in interface for resources that send notifications to events
*/
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct INotifier {
const struct NVOC_RTTI *__nvoc_rtti;
struct INotifier *__nvoc_pbase_INotifier;
PEVENTNOTIFICATION *(*__inotifyGetNotificationListPtr__)(struct INotifier *);
void (*__inotifySetNotificationShare__)(struct INotifier *, struct NotifShare *);
struct NotifShare *(*__inotifyGetNotificationShare__)(struct INotifier *);
NV_STATUS (*__inotifyUnregisterEvent__)(struct INotifier *, NvHandle, NvHandle, NvHandle, NvHandle);
NV_STATUS (*__inotifyGetOrAllocNotifShare__)(struct INotifier *, NvHandle, NvHandle, struct NotifShare **);
};
#ifndef __NVOC_CLASS_INotifier_TYPEDEF__
#define __NVOC_CLASS_INotifier_TYPEDEF__
typedef struct INotifier INotifier;
#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */
#ifndef __nvoc_class_id_INotifier
#define __nvoc_class_id_INotifier 0xf8f965
#endif /* __nvoc_class_id_INotifier */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
#define __staticCast_INotifier(pThis) \
((pThis)->__nvoc_pbase_INotifier)
#ifdef __nvoc_event_h_disabled
#define __dynamicCast_INotifier(pThis) ((INotifier*)NULL)
#else //__nvoc_event_h_disabled
#define __dynamicCast_INotifier(pThis) \
((INotifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(INotifier)))
#endif //__nvoc_event_h_disabled
NV_STATUS __nvoc_objCreateDynamic_INotifier(INotifier**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_INotifier(INotifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext);
#define __objCreate_INotifier(ppNewObj, pParent, createFlags, arg_pCallContext) \
__nvoc_objCreate_INotifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext)
#define inotifyGetNotificationListPtr(pNotifier) inotifyGetNotificationListPtr_DISPATCH(pNotifier)
#define inotifySetNotificationShare(pNotifier, pNotifShare) inotifySetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define inotifyGetNotificationShare(pNotifier) inotifyGetNotificationShare_DISPATCH(pNotifier)
#define inotifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) inotifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) inotifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
static inline PEVENTNOTIFICATION *inotifyGetNotificationListPtr_DISPATCH(struct INotifier *pNotifier) {
return pNotifier->__inotifyGetNotificationListPtr__(pNotifier);
}
static inline void inotifySetNotificationShare_DISPATCH(struct INotifier *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__inotifySetNotificationShare__(pNotifier, pNotifShare);
}
static inline struct NotifShare *inotifyGetNotificationShare_DISPATCH(struct INotifier *pNotifier) {
return pNotifier->__inotifyGetNotificationShare__(pNotifier);
}
static inline NV_STATUS inotifyUnregisterEvent_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__inotifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
static inline NV_STATUS inotifyGetOrAllocNotifShare_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__inotifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
NV_STATUS inotifyConstruct_IMPL(struct INotifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext);
#define __nvoc_inotifyConstruct(arg_pNotifier, arg_pCallContext) inotifyConstruct_IMPL(arg_pNotifier, arg_pCallContext)
void inotifyDestruct_IMPL(struct INotifier *pNotifier);
#define __nvoc_inotifyDestruct(pNotifier) inotifyDestruct_IMPL(pNotifier)
PEVENTNOTIFICATION inotifyGetNotificationList_IMPL(struct INotifier *pNotifier);
#ifdef __nvoc_event_h_disabled
static inline PEVENTNOTIFICATION inotifyGetNotificationList(struct INotifier *pNotifier) {
NV_ASSERT_FAILED_PRECOMP("INotifier was disabled!");
return NULL;
}
#else //__nvoc_event_h_disabled
#define inotifyGetNotificationList(pNotifier) inotifyGetNotificationList_IMPL(pNotifier)
#endif //__nvoc_event_h_disabled
#undef PRIVATE_FIELD
/**
* Basic implementation for event notification mix-in
*/
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct Notifier {
const struct NVOC_RTTI *__nvoc_rtti;
struct INotifier __nvoc_base_INotifier;
struct INotifier *__nvoc_pbase_INotifier;
struct Notifier *__nvoc_pbase_Notifier;
PEVENTNOTIFICATION *(*__notifyGetNotificationListPtr__)(struct Notifier *);
struct NotifShare *(*__notifyGetNotificationShare__)(struct Notifier *);
void (*__notifySetNotificationShare__)(struct Notifier *, struct NotifShare *);
NV_STATUS (*__notifyUnregisterEvent__)(struct Notifier *, NvHandle, NvHandle, NvHandle, NvHandle);
NV_STATUS (*__notifyGetOrAllocNotifShare__)(struct Notifier *, NvHandle, NvHandle, struct NotifShare **);
struct NotifShare *pNotifierShare;
};
#ifndef __NVOC_CLASS_Notifier_TYPEDEF__
#define __NVOC_CLASS_Notifier_TYPEDEF__
typedef struct Notifier Notifier;
#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */
#ifndef __nvoc_class_id_Notifier
#define __nvoc_class_id_Notifier 0xa8683b
#endif /* __nvoc_class_id_Notifier */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
#define __staticCast_Notifier(pThis) \
((pThis)->__nvoc_pbase_Notifier)
#ifdef __nvoc_event_h_disabled
#define __dynamicCast_Notifier(pThis) ((Notifier*)NULL)
#else //__nvoc_event_h_disabled
#define __dynamicCast_Notifier(pThis) \
((Notifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Notifier)))
#endif //__nvoc_event_h_disabled
NV_STATUS __nvoc_objCreateDynamic_Notifier(Notifier**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_Notifier(Notifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext);
#define __objCreate_Notifier(ppNewObj, pParent, createFlags, arg_pCallContext) \
__nvoc_objCreate_Notifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext)
#define notifyGetNotificationListPtr(pNotifier) notifyGetNotificationListPtr_DISPATCH(pNotifier)
#define notifyGetNotificationShare(pNotifier) notifyGetNotificationShare_DISPATCH(pNotifier)
#define notifySetNotificationShare(pNotifier, pNotifShare) notifySetNotificationShare_DISPATCH(pNotifier, pNotifShare)
#define notifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) notifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
#define notifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) notifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
PEVENTNOTIFICATION *notifyGetNotificationListPtr_IMPL(struct Notifier *pNotifier);
static inline PEVENTNOTIFICATION *notifyGetNotificationListPtr_DISPATCH(struct Notifier *pNotifier) {
return pNotifier->__notifyGetNotificationListPtr__(pNotifier);
}
struct NotifShare *notifyGetNotificationShare_IMPL(struct Notifier *pNotifier);
static inline struct NotifShare *notifyGetNotificationShare_DISPATCH(struct Notifier *pNotifier) {
return pNotifier->__notifyGetNotificationShare__(pNotifier);
}
void notifySetNotificationShare_IMPL(struct Notifier *pNotifier, struct NotifShare *pNotifShare);
static inline void notifySetNotificationShare_DISPATCH(struct Notifier *pNotifier, struct NotifShare *pNotifShare) {
pNotifier->__notifySetNotificationShare__(pNotifier, pNotifShare);
}
NV_STATUS notifyUnregisterEvent_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent);
static inline NV_STATUS notifyUnregisterEvent_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
return pNotifier->__notifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
}
NV_STATUS notifyGetOrAllocNotifShare_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare);
static inline NV_STATUS notifyGetOrAllocNotifShare_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
return pNotifier->__notifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
}
NV_STATUS notifyConstruct_IMPL(struct Notifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext);
#define __nvoc_notifyConstruct(arg_pNotifier, arg_pCallContext) notifyConstruct_IMPL(arg_pNotifier, arg_pCallContext)
void notifyDestruct_IMPL(struct Notifier *pNotifier);
#define __nvoc_notifyDestruct(pNotifier) notifyDestruct_IMPL(pNotifier)
#undef PRIVATE_FIELD
void CliAddSystemEvent(NvU32, NvU32);
NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject);
NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent);
NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject,
struct INotifier **ppNotifier,
PEVENTNOTIFICATION **pppEventNotification);
NV_STATUS registerEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvP64, NvBool);
NV_STATUS unregisterEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle);
NV_STATUS unregisterEventNotificationWithData(PEVENTNOTIFICATION *, NvHandle, NvHandle, NvHandle, NvBool, NvP64);
NV_STATUS bindEventNotificationToSubdevice(PEVENTNOTIFICATION, NvHandle, NvU32);
NV_STATUS engineNonStallIntrNotify(OBJGPU *, NvU32);
NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32);
NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, NvU32, NvHandle);
#endif // _EVENT_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_EVENT_NVOC_H_

View File

@@ -0,0 +1,334 @@
#define NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_generic_engine_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x4bc329 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_GenericEngineApi(GenericEngineApi*);
void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi*);
NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi*);
void __nvoc_dtor_GenericEngineApi(GenericEngineApi*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi;
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GenericEngineApi = {
/*pClassDef=*/ &__nvoc_class_def_GenericEngineApi,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericEngineApi,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_GenericEngineApi = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_GenericEngineApi_GenericEngineApi,
&__nvoc_rtti_GenericEngineApi_GpuResource,
&__nvoc_rtti_GenericEngineApi_RmResource,
&__nvoc_rtti_GenericEngineApi_RmResourceCommon,
&__nvoc_rtti_GenericEngineApi_RsResource,
&__nvoc_rtti_GenericEngineApi_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi =
{
/*classInfo=*/ {
/*size=*/ sizeof(GenericEngineApi),
/*classId=*/ classId(GenericEngineApi),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "GenericEngineApi",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericEngineApi,
/*pCastInfo=*/ &__nvoc_castinfo_GenericEngineApi,
/*pExportInfo=*/ &__nvoc_export_info_GenericEngineApi
};
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return genapiMap((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return genapiGetMapAddrSpace((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return genapiControl((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), ppMemDesc);
}
static NvHandle __nvoc_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_genapiControlLookup(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams, ppEntry);
}
static NvBool __nvoc_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_GenericEngineApi(GenericEngineApi *pThis) {
__nvoc_genapiDestruct(pThis);
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail_GpuResource;
__nvoc_init_dataField_GenericEngineApi(pThis);
status = __nvoc_genapiConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail__init;
goto __nvoc_ctor_GenericEngineApi_exit; // Success
__nvoc_ctor_GenericEngineApi_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_GenericEngineApi_fail_GpuResource:
__nvoc_ctor_GenericEngineApi_exit:
return status;
}
static void __nvoc_init_funcTable_GenericEngineApi_1(GenericEngineApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__genapiMap__ = &genapiMap_IMPL;
pThis->__genapiGetMapAddrSpace__ = &genapiGetMapAddrSpace_IMPL;
pThis->__genapiControl__ = &genapiControl_IMPL;
pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_GenericEngineApi_gpuresMap;
pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace;
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_GenericEngineApi_gpuresControl;
pThis->__genapiShareCallback__ = &__nvoc_thunk_GpuResource_genapiShareCallback;
pThis->__genapiUnmap__ = &__nvoc_thunk_GpuResource_genapiUnmap;
pThis->__genapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_genapiGetMemInterMapParams;
pThis->__genapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor;
pThis->__genapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_genapiGetInternalObjectHandle;
pThis->__genapiControlFilter__ = &__nvoc_thunk_RsResource_genapiControlFilter;
pThis->__genapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_genapiAddAdditionalDependants;
pThis->__genapiGetRefCount__ = &__nvoc_thunk_RsResource_genapiGetRefCount;
pThis->__genapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_genapiCheckMemInterUnmap;
pThis->__genapiMapTo__ = &__nvoc_thunk_RsResource_genapiMapTo;
pThis->__genapiControl_Prologue__ = &__nvoc_thunk_RmResource_genapiControl_Prologue;
pThis->__genapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize;
pThis->__genapiCanCopy__ = &__nvoc_thunk_RsResource_genapiCanCopy;
pThis->__genapiInternalControlForward__ = &__nvoc_thunk_GpuResource_genapiInternalControlForward;
pThis->__genapiPreDestruct__ = &__nvoc_thunk_RsResource_genapiPreDestruct;
pThis->__genapiUnmapFrom__ = &__nvoc_thunk_RsResource_genapiUnmapFrom;
pThis->__genapiControl_Epilogue__ = &__nvoc_thunk_RmResource_genapiControl_Epilogue;
pThis->__genapiControlLookup__ = &__nvoc_thunk_RsResource_genapiControlLookup;
pThis->__genapiAccessCallback__ = &__nvoc_thunk_RmResource_genapiAccessCallback;
}
void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi *pThis) {
__nvoc_init_funcTable_GenericEngineApi_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_GenericEngineApi(GenericEngineApi *pThis) {
pThis->__nvoc_pbase_GenericEngineApi = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_GenericEngineApi(pThis);
}
NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
GenericEngineApi *pThis;
pThis = portMemAllocNonPaged(sizeof(GenericEngineApi));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(GenericEngineApi));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GenericEngineApi);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_GenericEngineApi(pThis);
status = __nvoc_ctor_GenericEngineApi(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_GenericEngineApi_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_GenericEngineApi_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_GenericEngineApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,237 @@
#ifndef _G_GENERIC_ENGINE_NVOC_H_
#define _G_GENERIC_ENGINE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_generic_engine_nvoc.h"
#ifndef _GENERICENGINEAPI_H_
#define _GENERICENGINEAPI_H_
#include "gpu/gpu_resource.h"
/*!
* RM internal class providing a generic engine API to RM clients (e.g.:
* GF100_SUBDEVICE_GRAPHICS and GF100_SUBDEVICE_FB). Classes are primarily used
* for exposing BAR0 mappings and controls.
*/
#ifdef NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct GenericEngineApi {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct GenericEngineApi *__nvoc_pbase_GenericEngineApi;
NV_STATUS (*__genapiMap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NV_STATUS (*__genapiGetMapAddrSpace__)(struct GenericEngineApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NV_STATUS (*__genapiControl__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__genapiShareCallback__)(struct GenericEngineApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__genapiUnmap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__genapiGetMemInterMapParams__)(struct GenericEngineApi *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__genapiGetMemoryMappingDescriptor__)(struct GenericEngineApi *, struct MEMORY_DESCRIPTOR **);
NvHandle (*__genapiGetInternalObjectHandle__)(struct GenericEngineApi *);
NV_STATUS (*__genapiControlFilter__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__genapiAddAdditionalDependants__)(struct RsClient *, struct GenericEngineApi *, RsResourceRef *);
NvU32 (*__genapiGetRefCount__)(struct GenericEngineApi *);
NV_STATUS (*__genapiCheckMemInterUnmap__)(struct GenericEngineApi *, NvBool);
NV_STATUS (*__genapiMapTo__)(struct GenericEngineApi *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__genapiControl_Prologue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__genapiGetRegBaseOffsetAndSize__)(struct GenericEngineApi *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__genapiCanCopy__)(struct GenericEngineApi *);
NV_STATUS (*__genapiInternalControlForward__)(struct GenericEngineApi *, NvU32, void *, NvU32);
void (*__genapiPreDestruct__)(struct GenericEngineApi *);
NV_STATUS (*__genapiUnmapFrom__)(struct GenericEngineApi *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__genapiControl_Epilogue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__genapiControlLookup__)(struct GenericEngineApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NvBool (*__genapiAccessCallback__)(struct GenericEngineApi *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__
#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__
typedef struct GenericEngineApi GenericEngineApi;
#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */
#ifndef __nvoc_class_id_GenericEngineApi
#define __nvoc_class_id_GenericEngineApi 0x4bc329
#endif /* __nvoc_class_id_GenericEngineApi */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi;
#define __staticCast_GenericEngineApi(pThis) \
((pThis)->__nvoc_pbase_GenericEngineApi)
#ifdef __nvoc_generic_engine_h_disabled
#define __dynamicCast_GenericEngineApi(pThis) ((GenericEngineApi*)NULL)
#else //__nvoc_generic_engine_h_disabled
#define __dynamicCast_GenericEngineApi(pThis) \
((GenericEngineApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericEngineApi)))
#endif //__nvoc_generic_engine_h_disabled
NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_GenericEngineApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_GenericEngineApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define genapiMap(pGenericEngineApi, pCallContext, pParams, pCpuMapping) genapiMap_DISPATCH(pGenericEngineApi, pCallContext, pParams, pCpuMapping)
#define genapiGetMapAddrSpace(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) genapiGetMapAddrSpace_DISPATCH(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace)
#define genapiControl(pGenericEngineApi, pCallContext, pParams) genapiControl_DISPATCH(pGenericEngineApi, pCallContext, pParams)
#define genapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) genapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define genapiUnmap(pGpuResource, pCallContext, pCpuMapping) genapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define genapiGetMemInterMapParams(pRmResource, pParams) genapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define genapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) genapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define genapiGetInternalObjectHandle(pGpuResource) genapiGetInternalObjectHandle_DISPATCH(pGpuResource)
#define genapiControlFilter(pResource, pCallContext, pParams) genapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define genapiAddAdditionalDependants(pClient, pResource, pReference) genapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define genapiGetRefCount(pResource) genapiGetRefCount_DISPATCH(pResource)
#define genapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) genapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define genapiMapTo(pResource, pParams) genapiMapTo_DISPATCH(pResource, pParams)
#define genapiControl_Prologue(pResource, pCallContext, pParams) genapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define genapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) genapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define genapiCanCopy(pResource) genapiCanCopy_DISPATCH(pResource)
#define genapiInternalControlForward(pGpuResource, command, pParams, size) genapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define genapiPreDestruct(pResource) genapiPreDestruct_DISPATCH(pResource)
#define genapiUnmapFrom(pResource, pParams) genapiUnmapFrom_DISPATCH(pResource, pParams)
#define genapiControl_Epilogue(pResource, pCallContext, pParams) genapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define genapiControlLookup(pResource, pParams, ppEntry) genapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define genapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) genapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS genapiMap_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping);
static inline NV_STATUS genapiMap_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGenericEngineApi->__genapiMap__(pGenericEngineApi, pCallContext, pParams, pCpuMapping);
}
NV_STATUS genapiGetMapAddrSpace_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace);
static inline NV_STATUS genapiGetMapAddrSpace_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGenericEngineApi->__genapiGetMapAddrSpace__(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace);
}
NV_STATUS genapiControl_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
static inline NV_STATUS genapiControl_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGenericEngineApi->__genapiControl__(pGenericEngineApi, pCallContext, pParams);
}
static inline NvBool genapiShareCallback_DISPATCH(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__genapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS genapiUnmap_DISPATCH(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__genapiUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS genapiGetMemInterMapParams_DISPATCH(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__genapiGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS genapiGetMemoryMappingDescriptor_DISPATCH(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__genapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NvHandle genapiGetInternalObjectHandle_DISPATCH(struct GenericEngineApi *pGpuResource) {
return pGpuResource->__genapiGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS genapiControlFilter_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__genapiControlFilter__(pResource, pCallContext, pParams);
}
static inline void genapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) {
pResource->__genapiAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 genapiGetRefCount_DISPATCH(struct GenericEngineApi *pResource) {
return pResource->__genapiGetRefCount__(pResource);
}
static inline NV_STATUS genapiCheckMemInterUnmap_DISPATCH(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__genapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS genapiMapTo_DISPATCH(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__genapiMapTo__(pResource, pParams);
}
static inline NV_STATUS genapiControl_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__genapiControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS genapiGetRegBaseOffsetAndSize_DISPATCH(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__genapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
static inline NvBool genapiCanCopy_DISPATCH(struct GenericEngineApi *pResource) {
return pResource->__genapiCanCopy__(pResource);
}
static inline NV_STATUS genapiInternalControlForward_DISPATCH(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__genapiInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void genapiPreDestruct_DISPATCH(struct GenericEngineApi *pResource) {
pResource->__genapiPreDestruct__(pResource);
}
static inline NV_STATUS genapiUnmapFrom_DISPATCH(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__genapiUnmapFrom__(pResource, pParams);
}
static inline void genapiControl_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__genapiControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS genapiControlLookup_DISPATCH(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__genapiControlLookup__(pResource, pParams, ppEntry);
}
static inline NvBool genapiAccessCallback_DISPATCH(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__genapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS genapiConstruct_IMPL(struct GenericEngineApi *arg_pGenericEngineApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_genapiConstruct(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) genapiConstruct_IMPL(arg_pGenericEngineApi, arg_pCallContext, arg_pParams)
void genapiDestruct_IMPL(struct GenericEngineApi *pGenericEngineApi);
#define __nvoc_genapiDestruct(pGenericEngineApi) genapiDestruct_IMPL(pGenericEngineApi)
#undef PRIVATE_FIELD
#endif // _GENERICENGINEAPI_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GENERIC_ENGINE_NVOC_H_

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/core.h>
#include <gpu/gpu.h>
#include <gpu/eng_desc.h>
#include <g_allclasses.h>
const CLASSDESCRIPTOR *
gpuGetClassDescriptorList_T234D(POBJGPU pGpu, NvU32 *pNumClassDescriptors)
{
static const CLASSDESCRIPTOR halT234DClassDescriptorList[] = {
{ GF100_HDACODEC, ENG_HDACODEC },
{ NV01_MEMORY_SYNCPOINT, ENG_DMA },
{ NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY },
{ NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY },
{ NVC670_DISPLAY, ENG_KERNEL_DISPLAY },
{ NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY },
{ NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY },
{ NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY },
{ NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
{ NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
{ NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
{ NVC77F_ANY_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
};
#define HALT234D_NUM_CLASS_DESCS (sizeof(halT234DClassDescriptorList) / sizeof(CLASSDESCRIPTOR))
#define HALT234D_NUM_CLASSES 16
ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALT234D_NUM_CLASSES);
*pNumClassDescriptors = HALT234D_NUM_CLASS_DESCS;
return halT234DClassDescriptorList;
}

View File

@@ -0,0 +1,154 @@
#define NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_db_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xcdd250 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_GpuDb(GpuDb*);
void __nvoc_init_funcTable_GpuDb(GpuDb*);
NV_STATUS __nvoc_ctor_GpuDb(GpuDb*);
void __nvoc_init_dataField_GpuDb(GpuDb*);
void __nvoc_dtor_GpuDb(GpuDb*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb;
static const struct NVOC_RTTI __nvoc_rtti_GpuDb_GpuDb = {
/*pClassDef=*/ &__nvoc_class_def_GpuDb,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuDb,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_GpuDb_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuDb, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuDb = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_GpuDb_GpuDb,
&__nvoc_rtti_GpuDb_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb =
{
/*classInfo=*/ {
/*size=*/ sizeof(GpuDb),
/*classId=*/ classId(GpuDb),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "GpuDb",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuDb,
/*pCastInfo=*/ &__nvoc_castinfo_GpuDb,
/*pExportInfo=*/ &__nvoc_export_info_GpuDb
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_GpuDb(GpuDb *pThis) {
__nvoc_gpudbDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_GpuDb(GpuDb *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_GpuDb(GpuDb *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail_Object;
__nvoc_init_dataField_GpuDb(pThis);
status = __nvoc_gpudbConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail__init;
goto __nvoc_ctor_GpuDb_exit; // Success
__nvoc_ctor_GpuDb_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_GpuDb_fail_Object:
__nvoc_ctor_GpuDb_exit:
return status;
}
static void __nvoc_init_funcTable_GpuDb_1(GpuDb *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_GpuDb(GpuDb *pThis) {
__nvoc_init_funcTable_GpuDb_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_GpuDb(GpuDb *pThis) {
pThis->__nvoc_pbase_GpuDb = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_GpuDb(pThis);
}
NV_STATUS __nvoc_objCreate_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
GpuDb *pThis;
pThis = portMemAllocNonPaged(sizeof(GpuDb));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(GpuDb));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuDb);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_GpuDb(pThis);
status = __nvoc_ctor_GpuDb(pThis);
if (status != NV_OK) goto __nvoc_objCreate_GpuDb_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_GpuDb_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_GpuDb(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,154 @@
#ifndef _G_GPU_DB_NVOC_H_
#define _G_GPU_DB_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_db_nvoc.h"
#ifndef GPU_DB_H
#define GPU_DB_H
#include "core/core.h"
#include "containers/list.h"
#include "gpu/gpu_uuid.h"
typedef struct NBADDR NBADDR;
// ****************************************************************************
// Type Definitions
// ****************************************************************************
//
// The GPU database object is used to encapsulate the GPUINFO
//
/*!
* @brief Compute policy data for a GPU
* Saved policy information for a GPU that can be retrieved later
*/
typedef struct GPU_COMPUTE_POLICY_INFO
{
//
// Timeslice config for channels/TSG's on a runlist. The timeslice configs
// are restricted to four levels : default, short, medium and long.
//
NvU32 timeslice;
// Future policies to be added here
} GPU_COMPUTE_POLICY_INFO;
typedef struct
{
NvU32 domain;
NvU8 bus;
NvU8 device;
NvU8 function;
NvBool bValid;
} PCI_PORT_INFO;
#define GPUDB_CLK_PROP_TOP_POLS_COUNT 1
/*!
* @brief Clock Propagation Topology Policies control data
*/
typedef struct
{
NvU8 chosenIdx[GPUDB_CLK_PROP_TOP_POLS_COUNT];
} GPU_CLK_PROP_TOP_POLS_CONTROL;
typedef struct
{
NvU8 uuid[RM_SHA1_GID_SIZE];
PCI_PORT_INFO pciPortInfo;
PCI_PORT_INFO upstreamPciPortInfo;
GPU_COMPUTE_POLICY_INFO policyInfo;
NvBool bShutdownState;
GPU_CLK_PROP_TOP_POLS_CONTROL clkPropTopPolsControl;
} GPU_INFO_LIST_NODE, *PGPU_INFO_LIST_NODE;
MAKE_LIST(GpuInfoList, GPU_INFO_LIST_NODE);
#ifdef NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct GpuDb {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct GpuDb *__nvoc_pbase_GpuDb;
GpuInfoList gpuList;
PORT_MUTEX *pLock;
};
#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__
#define __NVOC_CLASS_GpuDb_TYPEDEF__
typedef struct GpuDb GpuDb;
#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */
#ifndef __nvoc_class_id_GpuDb
#define __nvoc_class_id_GpuDb 0xcdd250
#endif /* __nvoc_class_id_GpuDb */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb;
#define __staticCast_GpuDb(pThis) \
((pThis)->__nvoc_pbase_GpuDb)
#ifdef __nvoc_gpu_db_h_disabled
#define __dynamicCast_GpuDb(pThis) ((GpuDb*)NULL)
#else //__nvoc_gpu_db_h_disabled
#define __dynamicCast_GpuDb(pThis) \
((GpuDb*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuDb)))
#endif //__nvoc_gpu_db_h_disabled
NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_GpuDb(GpuDb**, Dynamic*, NvU32);
#define __objCreate_GpuDb(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_GpuDb((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS gpudbConstruct_IMPL(struct GpuDb *arg_pGpuDb);
#define __nvoc_gpudbConstruct(arg_pGpuDb) gpudbConstruct_IMPL(arg_pGpuDb)
void gpudbDestruct_IMPL(struct GpuDb *pGpuDb);
#define __nvoc_gpudbDestruct(pGpuDb) gpudbDestruct_IMPL(pGpuDb)
#undef PRIVATE_FIELD
NV_STATUS gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo);
NV_STATUS gpudbSetGpuComputePolicyConfig(const NvU8 *uuid, NvU32 policyType, GPU_COMPUTE_POLICY_INFO *policyInfo);
NV_STATUS gpudbGetGpuComputePolicyConfigs(const NvU8 *uuid, GPU_COMPUTE_POLICY_INFO *policyInfo);
NV_STATUS gpudbSetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl);
NV_STATUS gpudbGetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl);
NV_STATUS gpudbSetShutdownState(const NvU8 *pUuid);
#endif // GPU_DB_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_DB_NVOC_H_

View File

@@ -0,0 +1,148 @@
#define NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_group_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xe40531 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_OBJGPUGRP(OBJGPUGRP*);
void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP*);
NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP*);
void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP*);
void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP;
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_OBJGPUGRP = {
/*pClassDef=*/ &__nvoc_class_def_OBJGPUGRP,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUGRP,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJGPUGRP, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUGRP = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_OBJGPUGRP_OBJGPUGRP,
&__nvoc_rtti_OBJGPUGRP_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJGPUGRP),
/*classId=*/ classId(OBJGPUGRP),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJGPUGRP",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUGRP,
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPUGRP,
/*pExportInfo=*/ &__nvoc_export_info_OBJGPUGRP
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP *pThis) {
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJGPUGRP_fail_Object;
__nvoc_init_dataField_OBJGPUGRP(pThis);
goto __nvoc_ctor_OBJGPUGRP_exit; // Success
__nvoc_ctor_OBJGPUGRP_fail_Object:
__nvoc_ctor_OBJGPUGRP_exit:
return status;
}
static void __nvoc_init_funcTable_OBJGPUGRP_1(OBJGPUGRP *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP *pThis) {
__nvoc_init_funcTable_OBJGPUGRP_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_OBJGPUGRP(OBJGPUGRP *pThis) {
pThis->__nvoc_pbase_OBJGPUGRP = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_OBJGPUGRP(pThis);
}
NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJGPUGRP *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJGPUGRP));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJGPUGRP));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUGRP);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJGPUGRP(pThis);
status = __nvoc_ctor_OBJGPUGRP(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJGPUGRP_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJGPUGRP_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJGPUGRP(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,308 @@
#ifndef _G_GPU_GROUP_NVOC_H_
#define _G_GPU_GROUP_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_group_nvoc.h"
#ifndef GPU_GROUP_H
#define GPU_GROUP_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Defines and structures used for GPUGRP Object. *
* *
\***************************************************************************/
#include "core/core.h"
#include "nvoc/object.h"
#include "nvlimits.h"
struct OBJVASPACE;
struct OBJGPU;
/*!
* @brief Specialization of @ref FOR_EACH_INDEX_IN_MASK for looping
* over each GPU in an instance bitmask and processing the GPU in
* unicast mode.
*
* @note This macro is constructed to handle 'continue' and 'break'
* statements but not 'return.' Do NOT return directly from the loop -
* use status variable and 'break' to safely abort.
*
* @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64)
* @param[in,out] pGpu Local GPU variable to use.
* @param[in] mask GPU instance bitmask.
*/
#define FOR_EACH_GPU_IN_MASK_UC(maskWidth, pSys, pGpu, mask) \
{ \
NvU32 gpuInstance; \
NvBool bOrigBcState = NV_FALSE; \
NvBool bEntryBcState = NV_FALSE; \
OBJGPU *pEntryGpu = pGpu; \
pGpu = NULL; \
if (pEntryGpu != NULL) \
{ \
bEntryBcState = gpumgrGetBcEnabledStatus(pEntryGpu); \
} \
FOR_EACH_INDEX_IN_MASK(maskWidth, gpuInstance, mask) \
{ \
if (NULL != pGpu) /* continue */ \
{ \
gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \
} \
pGpu = gpumgrGetGpu(gpuInstance); \
if (pGpu == NULL) \
{ /* We should never hit this assert */ \
NV_ASSERT(0); /* But it occurs very rarely */ \
continue; /* It needs to be debugged */ \
} \
bOrigBcState = gpumgrGetBcEnabledStatus(pGpu); \
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); \
#define FOR_EACH_GPU_IN_MASK_UC_END \
} \
FOR_EACH_INDEX_IN_MASK_END \
if (NULL != pGpu) /* break */ \
{ \
gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \
pGpu = NULL; \
} \
if (pEntryGpu != NULL) \
{ \
NV_ASSERT(bEntryBcState == gpumgrGetBcEnabledStatus(pEntryGpu));\
pGpu = pEntryGpu; \
} \
}
typedef struct _def_vid_link_node
{
/*!
* GPU instance for this node
*/
NvU32 gpuInstance;
/*!
* DrPort that receives data from Child GPU
*/
NvU32 ParentDrPort;
/*!
* DrPort that sources data to a Parent GPU
*/
NvU32 ChildDrPort;
} SLILINKNODE;
typedef struct OBJGPUGRP *POBJGPUGRP;
#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
typedef struct OBJGPUGRP OBJGPUGRP;
#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJGPUGRP
#define __nvoc_class_id_OBJGPUGRP 0xe40531
#endif /* __nvoc_class_id_OBJGPUGRP */
#ifdef NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJGPUGRP {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJGPUGRP *__nvoc_pbase_OBJGPUGRP;
NvU32 gpuMask;
NvU32 gpuSliLinkMask;
NvU32 linkingGpuMask;
NvU32 attachedGpuMaskAtLinking;
SLILINKNODE SliLinkOrder[8];
NvU32 ConnectionCount;
NvU32 flags;
NvU32 displayFlags;
NvBool bcEnabled;
struct OBJGPU *parentGpu;
struct OBJVASPACE *pGlobalVASpace;
};
#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__
typedef struct OBJGPUGRP OBJGPUGRP;
#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJGPUGRP
#define __nvoc_class_id_OBJGPUGRP 0xe40531
#endif /* __nvoc_class_id_OBJGPUGRP */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP;
#define __staticCast_OBJGPUGRP(pThis) \
((pThis)->__nvoc_pbase_OBJGPUGRP)
#ifdef __nvoc_gpu_group_h_disabled
#define __dynamicCast_OBJGPUGRP(pThis) ((OBJGPUGRP*)NULL)
#else //__nvoc_gpu_group_h_disabled
#define __dynamicCast_OBJGPUGRP(pThis) \
((OBJGPUGRP*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUGRP)))
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32);
#define __objCreate_OBJGPUGRP(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJGPUGRP((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS gpugrpCreate_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpCreate(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpCreate(pGpuGrp, gpuMask) gpugrpCreate_IMPL(pGpuGrp, gpuMask)
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS gpugrpDestroy_IMPL(struct OBJGPUGRP *pGpuGrp);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpDestroy(struct OBJGPUGRP *pGpuGrp) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpDestroy(pGpuGrp) gpugrpDestroy_IMPL(pGpuGrp)
#endif //__nvoc_gpu_group_h_disabled
NvU32 gpugrpGetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp);
#ifdef __nvoc_gpu_group_h_disabled
static inline NvU32 gpugrpGetGpuMask(struct OBJGPUGRP *pGpuGrp) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return 0;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpGetGpuMask(pGpuGrp) gpugrpGetGpuMask_IMPL(pGpuGrp)
#endif //__nvoc_gpu_group_h_disabled
void gpugrpSetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask);
#ifdef __nvoc_gpu_group_h_disabled
static inline void gpugrpSetGpuMask(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpSetGpuMask(pGpuGrp, gpuMask) gpugrpSetGpuMask_IMPL(pGpuGrp, gpuMask)
#endif //__nvoc_gpu_group_h_disabled
NvBool gpugrpGetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp);
#ifdef __nvoc_gpu_group_h_disabled
static inline NvBool gpugrpGetBcEnabledState(struct OBJGPUGRP *pGpuGrp) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_FALSE;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpGetBcEnabledState(pGpuGrp) gpugrpGetBcEnabledState_IMPL(pGpuGrp)
#endif //__nvoc_gpu_group_h_disabled
void gpugrpSetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp, NvBool bcState);
#ifdef __nvoc_gpu_group_h_disabled
static inline void gpugrpSetBcEnabledState(struct OBJGPUGRP *pGpuGrp, NvBool bcState) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpSetBcEnabledState(pGpuGrp, bcState) gpugrpSetBcEnabledState_IMPL(pGpuGrp, bcState)
#endif //__nvoc_gpu_group_h_disabled
void gpugrpSetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu);
#ifdef __nvoc_gpu_group_h_disabled
static inline void gpugrpSetParentGpu(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpSetParentGpu(pGpuGrp, pParentGpu) gpugrpSetParentGpu_IMPL(pGpuGrp, pParentGpu)
#endif //__nvoc_gpu_group_h_disabled
struct OBJGPU *gpugrpGetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp);
#ifdef __nvoc_gpu_group_h_disabled
static inline struct OBJGPU *gpugrpGetParentGpu(struct OBJGPUGRP *pGpuGrp) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NULL;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpGetParentGpu(pGpuGrp) gpugrpGetParentGpu_IMPL(pGpuGrp)
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS gpugrpCreateGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpCreateGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpCreateGlobalVASpace(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) gpugrpCreateGlobalVASpace_IMPL(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS)
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS gpugrpDestroyGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpDestroyGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpDestroyGlobalVASpace(pGpuGrp, pGpu) gpugrpDestroyGlobalVASpace_IMPL(pGpuGrp, pGpu)
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS gpugrpGetGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpGetGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpGetGlobalVASpace(pGpuGrp, ppGlobalVAS) gpugrpGetGlobalVASpace_IMPL(pGpuGrp, ppGlobalVAS)
#endif //__nvoc_gpu_group_h_disabled
NV_STATUS gpugrpGetGpuFromSubDeviceInstance_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu);
#ifdef __nvoc_gpu_group_h_disabled
static inline NV_STATUS gpugrpGetGpuFromSubDeviceInstance(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_group_h_disabled
#define gpugrpGetGpuFromSubDeviceInstance(pGpuGrp, subDeviceInst, ppGpu) gpugrpGetGpuFromSubDeviceInstance_IMPL(pGpuGrp, subDeviceInst, ppGpu)
#endif //__nvoc_gpu_group_h_disabled
#undef PRIVATE_FIELD
#endif // GPU_GROUP_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_GROUP_NVOC_H_

View File

@@ -0,0 +1,97 @@
#define NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_halspec_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x34a6d6 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
void __nvoc_init_RmHalspecOwner(RmHalspecOwner*,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver);
void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner*);
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner*);
void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner*);
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner;
static const struct NVOC_RTTI __nvoc_rtti_RmHalspecOwner_RmHalspecOwner = {
/*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmHalspecOwner,
/*offset=*/ 0,
};
static const struct NVOC_CASTINFO __nvoc_castinfo_RmHalspecOwner = {
/*numRelatives=*/ 1,
/*relatives=*/ {
&__nvoc_rtti_RmHalspecOwner_RmHalspecOwner,
},
};
// Not instantiable because it's not derived from class "Object"
const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner =
{
/*classInfo=*/ {
/*size=*/ sizeof(RmHalspecOwner),
/*classId=*/ classId(RmHalspecOwner),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "RmHalspecOwner",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
/*pCastInfo=*/ &__nvoc_castinfo_RmHalspecOwner,
/*pExportInfo=*/ &__nvoc_export_info_RmHalspecOwner
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner *pThis) {
NV_STATUS status = NV_OK;
__nvoc_init_dataField_RmHalspecOwner(pThis);
goto __nvoc_ctor_RmHalspecOwner_exit; // Success
__nvoc_ctor_RmHalspecOwner_exit:
return status;
}
static void __nvoc_init_funcTable_RmHalspecOwner_1(RmHalspecOwner *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner *pThis) {
__nvoc_init_funcTable_RmHalspecOwner_1(pThis);
}
void __nvoc_init_RmHalspecOwner(RmHalspecOwner *pThis,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver) {
pThis->__nvoc_pbase_RmHalspecOwner = pThis;
__nvoc_init_halspec_ChipHal(&pThis->chipHal, ChipHal_arch, ChipHal_impl, ChipHal_hidrev);
__nvoc_init_halspec_RmVariantHal(&pThis->rmVariantHal, RmVariantHal_rmVariant);
__nvoc_init_halspec_DispIpHal(&pThis->dispIpHal, DispIpHal_ipver);
__nvoc_init_funcTable_RmHalspecOwner(pThis);
}

View File

@@ -0,0 +1,91 @@
#ifndef _G_GPU_HALSPEC_NVOC_H_
#define _G_GPU_HALSPEC_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_halspec_nvoc.h"
#ifndef GPU_HALSPEC_H
#define GPU_HALSPEC_H
#include "g_chips2halspec.h" // NVOC halspec, generated by rmconfig.pl
#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct RmHalspecOwner {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner;
struct ChipHal chipHal;
struct RmVariantHal rmVariantHal;
struct DispIpHal dispIpHal;
};
#ifndef __NVOC_CLASS_RmHalspecOwner_TYPEDEF__
#define __NVOC_CLASS_RmHalspecOwner_TYPEDEF__
typedef struct RmHalspecOwner RmHalspecOwner;
#endif /* __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ */
#ifndef __nvoc_class_id_RmHalspecOwner
#define __nvoc_class_id_RmHalspecOwner 0x34a6d6
#endif /* __nvoc_class_id_RmHalspecOwner */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
#define __staticCast_RmHalspecOwner(pThis) \
((pThis)->__nvoc_pbase_RmHalspecOwner)
#ifdef __nvoc_gpu_halspec_h_disabled
#define __dynamicCast_RmHalspecOwner(pThis) ((RmHalspecOwner*)NULL)
#else //__nvoc_gpu_halspec_h_disabled
#define __dynamicCast_RmHalspecOwner(pThis) \
((RmHalspecOwner*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmHalspecOwner)))
#endif //__nvoc_gpu_halspec_h_disabled
NV_STATUS __nvoc_objCreateDynamic_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver);
#define __objCreate_RmHalspecOwner(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) \
__nvoc_objCreate_RmHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver)
#undef PRIVATE_FIELD
#endif // GPU_HALSPEC_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_HALSPEC_NVOC_H_

View File

@@ -0,0 +1,322 @@
#define NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_mgmt_api_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x376305 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
void __nvoc_init_GpuManagementApi(GpuManagementApi*);
void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi*);
NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi*);
void __nvoc_dtor_GpuManagementApi(GpuManagementApi*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi;
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_GpuManagementApi = {
/*pClassDef=*/ &__nvoc_class_def_GpuManagementApi,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuManagementApi,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuManagementApi = {
/*numRelatives=*/ 5,
/*relatives=*/ {
&__nvoc_rtti_GpuManagementApi_GpuManagementApi,
&__nvoc_rtti_GpuManagementApi_RmResource,
&__nvoc_rtti_GpuManagementApi_RmResourceCommon,
&__nvoc_rtti_GpuManagementApi_RsResource,
&__nvoc_rtti_GpuManagementApi_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi =
{
/*classInfo=*/ {
/*size=*/ sizeof(GpuManagementApi),
/*classId=*/ classId(GpuManagementApi),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "GpuManagementApi",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuManagementApi,
/*pCastInfo=*/ &__nvoc_castinfo_GpuManagementApi,
/*pExportInfo=*/ &__nvoc_export_info_GpuManagementApi
};
static NvBool __nvoc_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), ppMemDesc);
}
static NvU32 __nvoc_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pReference);
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams);
}
static void __nvoc_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlLookup(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
#endif
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuManagementApi[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) gpumgmtapiCtrlCmdSetShutdownState_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*flags=*/ 0x7u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x200101u,
/*paramSize=*/ sizeof(NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_GpuManagementApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "gpumgmtapiCtrlCmdSetShutdownState"
#endif
},
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi =
{
/*numEntries=*/ 1,
/*pExportEntries=*/ __nvoc_exported_method_def_GpuManagementApi
};
void __nvoc_dtor_RmResource(RmResource*);
void __nvoc_dtor_GpuManagementApi(GpuManagementApi *pThis) {
__nvoc_gpumgmtapiDestruct(pThis);
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail_RmResource;
__nvoc_init_dataField_GpuManagementApi(pThis);
status = __nvoc_gpumgmtapiConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail__init;
goto __nvoc_ctor_GpuManagementApi_exit; // Success
__nvoc_ctor_GpuManagementApi_fail__init:
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_ctor_GpuManagementApi_fail_RmResource:
__nvoc_ctor_GpuManagementApi_exit:
return status;
}
static void __nvoc_init_funcTable_GpuManagementApi_1(GpuManagementApi *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
pThis->__gpumgmtapiCtrlCmdSetShutdownState__ = &gpumgmtapiCtrlCmdSetShutdownState_IMPL;
#endif
pThis->__gpumgmtapiShareCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiShareCallback;
pThis->__gpumgmtapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap;
pThis->__gpumgmtapiControl__ = &__nvoc_thunk_RsResource_gpumgmtapiControl;
pThis->__gpumgmtapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams;
pThis->__gpumgmtapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor;
pThis->__gpumgmtapiGetRefCount__ = &__nvoc_thunk_RsResource_gpumgmtapiGetRefCount;
pThis->__gpumgmtapiControlFilter__ = &__nvoc_thunk_RsResource_gpumgmtapiControlFilter;
pThis->__gpumgmtapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants;
pThis->__gpumgmtapiUnmap__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmap;
pThis->__gpumgmtapiControl_Prologue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Prologue;
pThis->__gpumgmtapiCanCopy__ = &__nvoc_thunk_RsResource_gpumgmtapiCanCopy;
pThis->__gpumgmtapiMapTo__ = &__nvoc_thunk_RsResource_gpumgmtapiMapTo;
pThis->__gpumgmtapiPreDestruct__ = &__nvoc_thunk_RsResource_gpumgmtapiPreDestruct;
pThis->__gpumgmtapiUnmapFrom__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmapFrom;
pThis->__gpumgmtapiControl_Epilogue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue;
pThis->__gpumgmtapiControlLookup__ = &__nvoc_thunk_RsResource_gpumgmtapiControlLookup;
pThis->__gpumgmtapiMap__ = &__nvoc_thunk_RsResource_gpumgmtapiMap;
pThis->__gpumgmtapiAccessCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiAccessCallback;
}
void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi *pThis) {
__nvoc_init_funcTable_GpuManagementApi_1(pThis);
}
void __nvoc_init_RmResource(RmResource*);
void __nvoc_init_GpuManagementApi(GpuManagementApi *pThis) {
pThis->__nvoc_pbase_GpuManagementApi = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_init_funcTable_GpuManagementApi(pThis);
}
NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
GpuManagementApi *pThis;
pThis = portMemAllocNonPaged(sizeof(GpuManagementApi));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(GpuManagementApi));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuManagementApi);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_GpuManagementApi(pThis);
status = __nvoc_ctor_GpuManagementApi(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_GpuManagementApi_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_GpuManagementApi_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_GpuManagementApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,221 @@
#ifndef _G_GPU_MGMT_API_NVOC_H_
#define _G_GPU_MGMT_API_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_mgmt_api_nvoc.h"
#ifndef GPU_MGMT_API_H
#define GPU_MGMT_API_H
#include "rmapi/resource.h"
#include "ctrl/ctrl0020.h"
// ****************************************************************************
// Type Definitions
// ****************************************************************************
//
// GpuManagementApi class information
//
// This is a global GPU class will help us to route IOCTLs to probed
// and persistent GPU state
//
#ifdef NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct GpuManagementApi {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmResource __nvoc_base_RmResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuManagementApi *__nvoc_pbase_GpuManagementApi;
NV_STATUS (*__gpumgmtapiCtrlCmdSetShutdownState__)(struct GpuManagementApi *, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *);
NvBool (*__gpumgmtapiShareCallback__)(struct GpuManagementApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__gpumgmtapiCheckMemInterUnmap__)(struct GpuManagementApi *, NvBool);
NV_STATUS (*__gpumgmtapiControl__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__gpumgmtapiGetMemInterMapParams__)(struct GpuManagementApi *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__gpumgmtapiGetMemoryMappingDescriptor__)(struct GpuManagementApi *, struct MEMORY_DESCRIPTOR **);
NvU32 (*__gpumgmtapiGetRefCount__)(struct GpuManagementApi *);
NV_STATUS (*__gpumgmtapiControlFilter__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__gpumgmtapiAddAdditionalDependants__)(struct RsClient *, struct GpuManagementApi *, RsResourceRef *);
NV_STATUS (*__gpumgmtapiUnmap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RsCpuMapping *);
NV_STATUS (*__gpumgmtapiControl_Prologue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__gpumgmtapiCanCopy__)(struct GpuManagementApi *);
NV_STATUS (*__gpumgmtapiMapTo__)(struct GpuManagementApi *, RS_RES_MAP_TO_PARAMS *);
void (*__gpumgmtapiPreDestruct__)(struct GpuManagementApi *);
NV_STATUS (*__gpumgmtapiUnmapFrom__)(struct GpuManagementApi *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__gpumgmtapiControl_Epilogue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__gpumgmtapiControlLookup__)(struct GpuManagementApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__gpumgmtapiMap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__gpumgmtapiAccessCallback__)(struct GpuManagementApi *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__
#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__
typedef struct GpuManagementApi GpuManagementApi;
#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */
#ifndef __nvoc_class_id_GpuManagementApi
#define __nvoc_class_id_GpuManagementApi 0x376305
#endif /* __nvoc_class_id_GpuManagementApi */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi;
#define __staticCast_GpuManagementApi(pThis) \
((pThis)->__nvoc_pbase_GpuManagementApi)
#ifdef __nvoc_gpu_mgmt_api_h_disabled
#define __dynamicCast_GpuManagementApi(pThis) ((GpuManagementApi*)NULL)
#else //__nvoc_gpu_mgmt_api_h_disabled
#define __dynamicCast_GpuManagementApi(pThis) \
((GpuManagementApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuManagementApi)))
#endif //__nvoc_gpu_mgmt_api_h_disabled
NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_GpuManagementApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_GpuManagementApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define gpumgmtapiCtrlCmdSetShutdownState(pGpuMgmt, pParams) gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(pGpuMgmt, pParams)
#define gpumgmtapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpumgmtapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
#define gpumgmtapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpumgmtapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define gpumgmtapiControl(pResource, pCallContext, pParams) gpumgmtapiControl_DISPATCH(pResource, pCallContext, pParams)
#define gpumgmtapiGetMemInterMapParams(pRmResource, pParams) gpumgmtapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define gpumgmtapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define gpumgmtapiGetRefCount(pResource) gpumgmtapiGetRefCount_DISPATCH(pResource)
#define gpumgmtapiControlFilter(pResource, pCallContext, pParams) gpumgmtapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define gpumgmtapiAddAdditionalDependants(pClient, pResource, pReference) gpumgmtapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define gpumgmtapiUnmap(pResource, pCallContext, pCpuMapping) gpumgmtapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
#define gpumgmtapiControl_Prologue(pResource, pCallContext, pParams) gpumgmtapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define gpumgmtapiCanCopy(pResource) gpumgmtapiCanCopy_DISPATCH(pResource)
#define gpumgmtapiMapTo(pResource, pParams) gpumgmtapiMapTo_DISPATCH(pResource, pParams)
#define gpumgmtapiPreDestruct(pResource) gpumgmtapiPreDestruct_DISPATCH(pResource)
#define gpumgmtapiUnmapFrom(pResource, pParams) gpumgmtapiUnmapFrom_DISPATCH(pResource, pParams)
#define gpumgmtapiControl_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define gpumgmtapiControlLookup(pResource, pParams, ppEntry) gpumgmtapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define gpumgmtapiMap(pResource, pCallContext, pParams, pCpuMapping) gpumgmtapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
#define gpumgmtapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpumgmtapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_IMPL(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams);
static inline NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams) {
return pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__(pGpuMgmt, pParams);
}
static inline NvBool gpumgmtapiShareCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pResource->__gpumgmtapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS gpumgmtapiCheckMemInterUnmap_DISPATCH(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__gpumgmtapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS gpumgmtapiControl_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__gpumgmtapiControl__(pResource, pCallContext, pParams);
}
static inline NV_STATUS gpumgmtapiGetMemInterMapParams_DISPATCH(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__gpumgmtapiGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__gpumgmtapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NvU32 gpumgmtapiGetRefCount_DISPATCH(struct GpuManagementApi *pResource) {
return pResource->__gpumgmtapiGetRefCount__(pResource);
}
static inline NV_STATUS gpumgmtapiControlFilter_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__gpumgmtapiControlFilter__(pResource, pCallContext, pParams);
}
static inline void gpumgmtapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) {
pResource->__gpumgmtapiAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS gpumgmtapiUnmap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
return pResource->__gpumgmtapiUnmap__(pResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS gpumgmtapiControl_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__gpumgmtapiControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool gpumgmtapiCanCopy_DISPATCH(struct GpuManagementApi *pResource) {
return pResource->__gpumgmtapiCanCopy__(pResource);
}
static inline NV_STATUS gpumgmtapiMapTo_DISPATCH(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__gpumgmtapiMapTo__(pResource, pParams);
}
static inline void gpumgmtapiPreDestruct_DISPATCH(struct GpuManagementApi *pResource) {
pResource->__gpumgmtapiPreDestruct__(pResource);
}
static inline NV_STATUS gpumgmtapiUnmapFrom_DISPATCH(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__gpumgmtapiUnmapFrom__(pResource, pParams);
}
static inline void gpumgmtapiControl_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__gpumgmtapiControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS gpumgmtapiControlLookup_DISPATCH(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__gpumgmtapiControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS gpumgmtapiMap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
return pResource->__gpumgmtapiMap__(pResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool gpumgmtapiAccessCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__gpumgmtapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS gpumgmtapiConstruct_IMPL(struct GpuManagementApi *arg_pGpuMgmt, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_gpumgmtapiConstruct(arg_pGpuMgmt, arg_pCallContext, arg_pParams) gpumgmtapiConstruct_IMPL(arg_pGpuMgmt, arg_pCallContext, arg_pParams)
void gpumgmtapiDestruct_IMPL(struct GpuManagementApi *pGpuMgmt);
#define __nvoc_gpumgmtapiDestruct(pGpuMgmt) gpumgmtapiDestruct_IMPL(pGpuMgmt)
#undef PRIVATE_FIELD
#endif // GPU_MGMT_API_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_MGMT_API_NVOC_H_

View File

@@ -0,0 +1,154 @@
#define NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_mgr_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xcf1b25 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_OBJGPUMGR(OBJGPUMGR*);
void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR*);
NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR*);
void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR*);
void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR;
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_OBJGPUMGR = {
/*pClassDef=*/ &__nvoc_class_def_OBJGPUMGR,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMGR,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJGPUMGR, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUMGR = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_OBJGPUMGR_OBJGPUMGR,
&__nvoc_rtti_OBJGPUMGR_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJGPUMGR),
/*classId=*/ classId(OBJGPUMGR),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJGPUMGR",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMGR,
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPUMGR,
/*pExportInfo=*/ &__nvoc_export_info_OBJGPUMGR
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR *pThis) {
__nvoc_gpumgrDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail_Object;
__nvoc_init_dataField_OBJGPUMGR(pThis);
status = __nvoc_gpumgrConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail__init;
goto __nvoc_ctor_OBJGPUMGR_exit; // Success
__nvoc_ctor_OBJGPUMGR_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_OBJGPUMGR_fail_Object:
__nvoc_ctor_OBJGPUMGR_exit:
return status;
}
static void __nvoc_init_funcTable_OBJGPUMGR_1(OBJGPUMGR *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR *pThis) {
__nvoc_init_funcTable_OBJGPUMGR_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_OBJGPUMGR(OBJGPUMGR *pThis) {
pThis->__nvoc_pbase_OBJGPUMGR = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_OBJGPUMGR(pThis);
}
NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJGPUMGR *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJGPUMGR));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJGPUMGR));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUMGR);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJGPUMGR(pThis);
status = __nvoc_ctor_OBJGPUMGR(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMGR_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJGPUMGR_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJGPUMGR(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,425 @@
#ifndef _G_GPU_MGR_NVOC_H_
#define _G_GPU_MGR_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_mgr_nvoc.h"
#ifndef _GPUMGR_H_
#define _GPUMGR_H_
//
// GPU Manager Defines and Structures
//
struct OBJGPU;
#include "core/core.h"
#include "core/system.h"
#include "nvlimits.h"
#include "gpu_mgr/gpu_group.h"
#include "gpu/gpu_uuid.h"
#include "gpu/gpu_device_mapping.h"
#include "gpu/gpu_access.h"
#include "ctrl/ctrl0000/ctrl0000gpu.h"
#include "ctrl/ctrl2080/ctrl2080ce.h"
#include "ctrl/ctrl2080/ctrl2080internal.h"
#include "nvoc/utility.h"
#include "nv_firmware_types.h"
#include "class/cl2080.h" // NV2080_ENGINE_TYPE_*
#include "utils/nvbitvector.h"
TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR);
#define GPUMGR_MAX_GPU_INSTANCES 8
#define GPUMGR_MAX_COMPUTE_INSTANCES 8
MAKE_BITVECTOR(ENGTYPE_BIT_VECTOR, NV2080_ENGINE_TYPE_LAST);
typedef ENGTYPE_BIT_VECTOR *PENGTYPE_BIT_VECTOR;
//
// Terminology:
// GPU -> entity sitting on the bus
// Device -> broadcast semantics; maps to one or more GPUs
// Subdevice -> unicast semantics; maps to a single GPU
//
////////////////////////////////////////////////////////////////////////////////
// DO NOT ADD NEW STUBS HERE //
////////////////////////////////////////////////////////////////////////////////
#define gpumgrGetGpuLinkCount(deviceInstance) ((NvU32) 0)
#define gpumgrGetSliLinkOutputMaskFromGpu(pGpu) ((NvU32) 0)
#define gpumgrGetVidLinkOutputMaskFromGpu(pGpu) ((NvU32) 0)
#define gpumgrGetSliLinkOrderCount(pGpu) ((NvU32) 0)
#define gpumgrGetSliLinkConnectionCount(pGpu) ((NvU32) 0)
#define gpumgrGetSLIConfig(gpuInstance, onlyWithSliLink) ((NvU32) 0)
#define gpumgrDisableVidLink(pGpu, head, max_dr_port)
#define gpumgrGetGpuVidLinkMaxPixelClock(pGpu, pMaxPclkMhz) (NV_ERR_NOT_SUPPORTED)
#define gpumgrPinsetToPinsetTableIndex(pinset, pPinsetIndex) (NV_ERR_NOT_SUPPORTED)
#define gpumgrGetBcEnabledStatus(g) (NV_FALSE)
#define gpumgrGetBcEnabledStatusEx(g, t) (NV_FALSE)
#define gpumgrSetBcEnabledStatus(g, b) do { NvBool b2 = b; (void)b2; } while (0)
#define gpumgrSLILoopReentrancy(pGpu, l, r, i, pFuncStr)
#define gpumgrSLILoopReentrancyPop(pGpu) ((NvU32)0)
#define gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy) do { NvU32 x = sliLoopReentrancy; (void)x; } while(0)
typedef struct
{
NvU32 gpuId;
NvU64 gpuDomainBusDevice;
NvBool bInitAttempted;
NvBool bDrainState; // no new client connections to this GPU
NvBool bRemoveIdle; // remove this GPU once it's idle (detached)
NvBool bExcluded; // this gpu is marked as excluded; do not use
NvBool bUuidValid; // cached uuid is valid
NvBool bSkipHwNvlinkDisable; //skip HW registers configuration for disabled links
NvU32 initDisabledNvlinksMask;
NV_STATUS initStatus;
NvU8 uuid[RM_SHA1_GID_SIZE];
OS_RM_CAPS *pOsRmCaps; // "Opaque" pointer to os-specific capabilities
} PROBEDGPU;
#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_FLIPS 11:4
#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME 12:12
#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_INVALID 0x0000000
#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_VALID 0x0000001
/*!
* Structure for tracking resources allocated for saving primary GPU's VBIOS
* state. This is used for TDR/fullchip reset recovery. The GPU object gets
* destroyed, so the data belongs here.
*/
typedef struct _def_gpumgr_save_vbios_state
{
RmPhysAddr vgaWorkspaceVidMemBase; //<! Base address of the VGA workspace
struct MEMORY_DESCRIPTOR *pSaveToMemDesc; //<! Where VGA workspace is saved to
void *pSaveRegsOpaque; //<! Saved values of VGA registers
} GPUMGRSAVEVBIOSSTATE, *PGPUMGRSAVEVBIOSSTATE;
//
// types of bridges supported.
// These defines are inices for the types of bridges supported.
// preference for a given bridge type is determined by the lower value index.
// I.E. Video Link has the lower value index, so in the event that both NvLink & video link is
// detected, the video link will be used.
//
#define SLI_MAX_BRIDGE_TYPES 2
#define SLI_BT_VIDLINK 0
#define SLI_BT_NVLINK 1
typedef struct NVLINK_TOPOLOGY_PARAMS
{
NvU32 sysmemLinks;
NvU32 maxLinksPerPeer;
NvBool bSymmetric;
// Pascal only
NvU32 numLinks;
// Volta +
NvU32 numPeers;
NvBool bSwitchConfig;
// Ampere +
NvU32 pceAvailableMaskPerHshub[NV2080_CTRL_CE_MAX_HSHUBS];
NvU32 fbhubPceMask;
NvU32 maxPceLceMap[NV2080_CTRL_MAX_PCES];
NvU32 maxGrceConfig[NV2080_CTRL_MAX_GRCES];
NvU32 maxExposeCeMask;
NvU32 maxTopoIdx; // For table configs only; not applicable for algorithm
} NVLINK_TOPOLOGY_PARAMS, *PNVLINK_TOPOLOGY_PARAMS;
typedef struct _def_gpu_nvlink_topology_info
{
NvBool valid;
NvU64 DomainBusDevice;
NVLINK_TOPOLOGY_PARAMS params;
} NVLINK_TOPOLOGY_INFO, *PNVLINK_TOPOLOGY_INFO;
typedef struct
{
OBJGPU *pGpu;
NvU32 gpuInstance;
} GPU_HANDLE_ID;
#ifdef NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY;
struct OBJGPUMGR {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJGPUMGR *__nvoc_pbase_OBJGPUMGR;
PROBEDGPU probedGpus[32];
void *probedGpusLock;
NvU32 gpuAttachCount;
NvU32 gpuAttachMask;
NvU32 persistentSwStateGpuMask;
NvU32 deviceCount;
struct OBJGPUGRP *pGpuGrpTable[32];
NvU32 gpuInstMaskTable[32];
NvU8 gpuBridgeType;
GPUMGRSAVEVBIOSSTATE primaryVbiosState;
NvU8 powerDisconnectedGpuCount;
NvU8 powerDisconnectedGpuBus[32];
GPU_HANDLE_ID gpuHandleIDList[32];
NvU32 numGpuHandles;
};
#ifndef __NVOC_CLASS_OBJGPUMGR_TYPEDEF__
#define __NVOC_CLASS_OBJGPUMGR_TYPEDEF__
typedef struct OBJGPUMGR OBJGPUMGR;
#endif /* __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJGPUMGR
#define __nvoc_class_id_OBJGPUMGR 0xcf1b25
#endif /* __nvoc_class_id_OBJGPUMGR */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR;
#define __staticCast_OBJGPUMGR(pThis) \
((pThis)->__nvoc_pbase_OBJGPUMGR)
#ifdef __nvoc_gpu_mgr_h_disabled
#define __dynamicCast_OBJGPUMGR(pThis) ((OBJGPUMGR*)NULL)
#else //__nvoc_gpu_mgr_h_disabled
#define __dynamicCast_OBJGPUMGR(pThis) \
((OBJGPUMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMGR)))
#endif //__nvoc_gpu_mgr_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32);
#define __objCreate_OBJGPUMGR(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJGPUMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
static inline void gpumgrAddSystemNvlinkTopo(NvU64 DomainBusDevice) {
return;
}
static inline NvBool gpumgrGetSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) {
return ((NvBool)(0 != 0));
}
static inline void gpumgrUpdateSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) {
return;
}
static inline NV_STATUS gpumgrSetGpuInitDisabledNvlinks(NvU32 gpuId, NvU32 mask, NvBool bSkipHwNvlinkDisable) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS gpumgrGetGpuInitDisabledNvlinks(NvU32 gpuId, NvU32 *pMask, NvBool *pbSkipHwNvlinkDisable) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NvBool gpumgrCheckIndirectPeer(struct OBJGPU *pGpu, struct OBJGPU *pRemoteGpu) {
return ((NvBool)(0 != 0));
}
static inline void gpumgrAddSystemMIGInstanceTopo(NvU64 domainBusDevice) {
return;
}
static inline NvBool gpumgrGetSystemMIGInstanceTopo(NvU64 domainBusDevice, struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopoParams) {
return ((NvBool)(0 != 0));
}
static inline NvBool gpumgrIsSystemMIGEnabled(NvU64 domainBusDevice) {
return ((NvBool)(0 != 0));
}
static inline void gpumgrSetSystemMIGEnabled(NvU64 domainBusDevice, NvBool bMIGEnabled) {
return;
}
static inline void gpumgrUnregisterRmCapsForMIGGI(NvU64 gpuDomainBusDevice) {
return;
}
static inline void gpumgrUpdateBoardId(struct OBJGPU *arg0) {
return;
}
static inline void gpumgrServiceInterrupts(NvU32 arg0, MC_ENGINE_BITVECTOR *arg1, NvBool arg2) {
return;
}
NV_STATUS gpumgrConstruct_IMPL(struct OBJGPUMGR *arg_);
#define __nvoc_gpumgrConstruct(arg_) gpumgrConstruct_IMPL(arg_)
void gpumgrDestruct_IMPL(struct OBJGPUMGR *arg0);
#define __nvoc_gpumgrDestruct(arg0) gpumgrDestruct_IMPL(arg0)
#undef PRIVATE_FIELD
typedef struct {
NvBool specified; // Set this flag when using this struct
NvBool bIsIGPU; // Set this flag for iGPU
DEVICE_MAPPING deviceMapping[DEVICE_INDEX_MAX]; // Register Aperture mapping
NvU32 socChipId0; // Chip ID used for HAL binding
NvU32 iovaspaceId; // SMMU client ID
} SOCGPUATTACHARG;
//
// Packages up system/bus state for attach process.
//
typedef struct GPUATTACHARG
{
GPUHWREG *regBaseAddr;
GPUHWREG *fbBaseAddr;
GPUHWREG *instBaseAddr;
RmPhysAddr devPhysAddr;
RmPhysAddr fbPhysAddr;
RmPhysAddr instPhysAddr;
RmPhysAddr ioPhysAddr;
NvU64 nvDomainBusDeviceFunc;
NvU32 regLength;
NvU64 fbLength;
NvU32 instLength;
NvU32 intLine;
void *pOsAttachArg;
NvBool bIsSOC;
NvU32 socDeviceCount;
DEVICE_MAPPING socDeviceMappings[GPU_MAX_DEVICE_MAPPINGS];
NvU32 socId;
NvU32 socSubId;
NvU32 socChipId0;
NvU32 iovaspaceId;
NvBool bRequestFwClientRm;
//
// The SOC-specific fields above are legacy fields that were added for
// ARCH MODS iGPU verification. There is a plan to deprecate these fields as
// part of an effort to clean up the existing iGPU code in RM.
//
// Starting with T234D+, the SOCGPUATTACHARG field below will be used to
// pass the required attach info for a single SOC device from the RM OS
// layer to core RM.
//
SOCGPUATTACHARG socDeviceArgs;
} GPUATTACHARG;
NV_STATUS gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask);
NV_STATUS gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *);
NV_STATUS gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice);
NV_STATUS gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *);
NV_STATUS gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *);
NV_STATUS gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *);
void gpumgrSetGpuId(OBJGPU*, NvU32 gpuId);
NV_STATUS gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *);
void gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status);
OBJGPU* gpumgrGetGpuFromId(NvU32 gpuId);
OBJGPU* gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags);
OBJGPU* gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device);
NvU32 gpumgrGetDefaultPrimaryGpu(NvU32 gpuMask);
NV_STATUS gpumgrAllocGpuInstance(NvU32 *pDeviceInstance);
NV_STATUS gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice);
NV_STATUS gpumgrUnregisterGpuId(NvU32 gpuId);
NV_STATUS gpumgrExcludeGpuId(NvU32 gpuId);
NV_STATUS gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid);
NV_STATUS gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags);
NV_STATUS gpumgrAttachGpu(NvU32 deviceInstance, GPUATTACHARG *);
NV_STATUS gpumgrDetachGpu(NvU32 deviceInstance);
OBJGPU* gpumgrGetNextGpu(NvU32 gpuMask, NvU32 *pStartIndex);
NV_STATUS gpumgrStatePreInitGpu(OBJGPU*);
NV_STATUS gpumgrStateInitGpu(OBJGPU*);
NV_STATUS gpumgrStateLoadGpu(OBJGPU*, NvU32);
NV_STATUS gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance);
NV_STATUS gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal);
NV_STATUS gpumgrDestroyDevice(NvU32 deviceInstance);
NvU32 gpumgrGetDeviceInstanceMask(void);
NvU32 gpumgrGetDeviceGpuMask(NvU32 deviceInstance);
NV_STATUS gpumgrIsDeviceInstanceValid(NvU32 deviceInstance);
NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance);
NvBool gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance);
NvBool gpumgrIsDeviceEnabled(NvU32 deviceInstance);
NvU32 gpumgrGetGpuMask(OBJGPU *pGpu);
OBJGPU* gpumgrGetGpu(NvU32 deviceInstance);
OBJGPU* gpumgrGetSomeGpu(void);
NvU32 gpumgrGetSubDeviceCount(NvU32 gpuMask);
NvU32 gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu);
NvU32 gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu);
NvU32 gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu);
OBJGPU* gpumgrGetParentGPU(OBJGPU *pGpu);
void gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu);
NvBool gpumgrIsGpuDisplayParent(OBJGPU*);
OBJGPU* gpumgrGetDisplayParent(OBJGPU*);
NV_STATUS gpumgrGetGpuLockAndDrPorts(OBJGPU*, OBJGPU*, NvU32 *, NvU32 *);
NV_STATUS gpumgrGetBootPrimary(OBJGPU **ppGpu);
OBJGPU* gpumgrGetMGpu(void);
RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU*);
OBJGPU* gpumgrGetGpuFromSubDeviceInst(NvU32, NvU32);
NV_STATUS gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask);
NV_STATUS gpumgrRemoveDeviceInstanceFromGpus(NvU32 gpuMask);
NV_STATUS gpumgrConstructGpuGrpObject(struct OBJGPUMGR *pGpuMgr, NvU32 gpuMask, struct OBJGPUGRP **ppGpuGrp);
struct OBJGPUGRP* gpumgrGetGpuGrpFromGpu(OBJGPU *pGpu);
struct OBJGPUGRP* gpumgrGetGpuGrpFromInstance(NvU32 gpugrpInstance);
NV_STATUS gpumgrModifyGpuDrainState(NvU32 gpuId, NvBool bEnable, NvBool bRemove, NvBool bLinkDisable);
NV_STATUS gpumgrQueryGpuDrainState(NvU32 gpuId, NvBool *pBEnable, NvBool *pBRemove);
NvBool gpumgrIsGpuPointerValid(OBJGPU *pGpu);
NvU32 gpumgrGetGrpMaskFromGpuInst(NvU32 gpuInst);
void gpumgrAddDeviceMaskToGpuInstTable(NvU32 gpuMask);
void gpumgrClearDeviceMaskFromGpuInstTable(NvU32 gpuMask);
NvBool gpumgrSetGpuAcquire(OBJGPU *pGpu);
void gpumgrSetGpuRelease(void);
NvU8 gpumgrGetGpuBridgeType(void);
//
// gpumgrIsSubDeviceCountOne
//
static NV_INLINE NvBool
gpumgrIsSubDeviceCountOne(NvU32 gpuMask)
{
//
// A fast version of gpumgrGetSubDeviceCount(gpumask) == 1.
// Make sure it returns 0 for gpuMask==0, just like gpumgrGetSubDeviceCount(0)!!!
//
return gpuMask != 0 && (gpuMask&(gpuMask-1)) == 0;
}
//
// gpumgrIsParentGPU
//
static NV_INLINE NvBool
gpumgrIsParentGPU(OBJGPU *pGpu)
{
return gpumgrGetParentGPU(pGpu) == pGpu;
}
#endif // _GPUMGR_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_MGR_NVOC_H_

View File

@@ -0,0 +1,433 @@
#define NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x7ef3cb = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE;
void __nvoc_init_OBJGPU(OBJGPU*,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver);
void __nvoc_init_funcTable_OBJGPU(OBJGPU*);
NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU*, NvU32 arg_gpuInstance);
void __nvoc_init_dataField_OBJGPU(OBJGPU*);
void __nvoc_dtor_OBJGPU(OBJGPU*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU;
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJGPU = {
/*pClassDef=*/ &__nvoc_class_def_OBJGPU,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPU,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_RmHalspecOwner = {
/*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_RmHalspecOwner),
};
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJTRACEABLE = {
/*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_OBJTRACEABLE),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPU = {
/*numRelatives=*/ 4,
/*relatives=*/ {
&__nvoc_rtti_OBJGPU_OBJGPU,
&__nvoc_rtti_OBJGPU_OBJTRACEABLE,
&__nvoc_rtti_OBJGPU_RmHalspecOwner,
&__nvoc_rtti_OBJGPU_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJGPU),
/*classId=*/ classId(OBJGPU),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJGPU",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPU,
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPU,
/*pExportInfo=*/ &__nvoc_export_info_OBJGPU
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*);
void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*);
void __nvoc_dtor_OBJGPU(OBJGPU *pThis) {
__nvoc_gpuDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
__nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
pThis->setProperty(pThis, PDB_PROP_GPU_IS_CONNECTED, ((NvBool)(0 == 0)));
// NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 == 0)));
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_IGPU
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_IGPU, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_ATS_SUPPORTED
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_ATS_SUPPORTED, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_ZERO_FB
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_ZERO_FB, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTED
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_IS_COT_ENABLED
if (0)
{
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_IS_COT_ENABLED, ((NvBool)(0 != 0)));
}
pThis->boardId = ~0;
pThis->deviceInstance = 32;
// Hal field -- isVirtual
if (0)
{
}
else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->isVirtual = ((NvBool)(0 != 0));
}
// Hal field -- isGspClient
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->isGspClient = ((NvBool)(0 == 0));
}
else if (0)
{
}
pThis->bIsDebugModeEnabled = ((NvBool)(0 != 0));
pThis->numOfMclkLockRequests = 0U;
pThis->bUseRegisterAccessMap = !(0);
pThis->boardInfo = ((void *)0);
// Hal field -- bUnifiedMemorySpaceEnabled
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 != 0));
}
// Hal field -- bWarBug200577889SriovHeavyEnabled
pThis->bWarBug200577889SriovHeavyEnabled = ((NvBool)(0 != 0));
// Hal field -- bNeed4kPageIsolation
if (0)
{
}
// default
else
{
pThis->bNeed4kPageIsolation = ((NvBool)(0 != 0));
}
// Hal field -- bInstLoc47bitPaWar
if (0)
{
}
// default
else
{
pThis->bInstLoc47bitPaWar = ((NvBool)(0 != 0));
}
// Hal field -- bIsBarPteInSysmemSupported
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 != 0));
}
// Hal field -- bClientRmAllocatedCtxBuffer
if (0)
{
}
// default
else
{
pThis->bClientRmAllocatedCtxBuffer = ((NvBool)(0 != 0));
}
// Hal field -- bVidmemPreservationBrokenBug3172217
if (0)
{
}
// default
else
{
pThis->bVidmemPreservationBrokenBug3172217 = ((NvBool)(0 != 0));
}
// Hal field -- bInstanceMemoryAlwaysCached
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 != 0));
}
pThis->bIsGeforce = ((NvBool)(0 == 0));
// Hal field -- bComputePolicyTimesliceSupported
if (0)
{
}
// default
else
{
pThis->bComputePolicyTimesliceSupported = ((NvBool)(0 != 0));
}
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner* );
NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* );
NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU *pThis, NvU32 arg_gpuInstance) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_Object;
status = __nvoc_ctor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_RmHalspecOwner;
status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_OBJTRACEABLE;
__nvoc_init_dataField_OBJGPU(pThis);
status = __nvoc_gpuConstruct(pThis, arg_gpuInstance);
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail__init;
goto __nvoc_ctor_OBJGPU_exit; // Success
__nvoc_ctor_OBJGPU_fail__init:
__nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
__nvoc_ctor_OBJGPU_fail_OBJTRACEABLE:
__nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
__nvoc_ctor_OBJGPU_fail_RmHalspecOwner:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_OBJGPU_fail_Object:
__nvoc_ctor_OBJGPU_exit:
return status;
}
static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
}
void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) {
__nvoc_init_funcTable_OBJGPU_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, NvU32, NvU32, NvU32, RM_RUNTIME_VARIANT, NvU32);
void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*);
void __nvoc_init_OBJGPU(OBJGPU *pThis,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver) {
pThis->__nvoc_pbase_OBJGPU = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
pThis->__nvoc_pbase_RmHalspecOwner = &pThis->__nvoc_base_RmHalspecOwner;
pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver);
__nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
__nvoc_init_funcTable_OBJGPU(pThis);
}
NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags,
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance) {
NV_STATUS status;
Object *pParentObj;
OBJGPU *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJGPU));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJGPU));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPU);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJGPU(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver);
status = __nvoc_ctor_OBJGPU(pThis, arg_gpuInstance);
if (status != NV_OK) goto __nvoc_objCreate_OBJGPU_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJGPU_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
NvU32 ChipHal_arch = va_arg(args, NvU32);
NvU32 ChipHal_impl = va_arg(args, NvU32);
NvU32 ChipHal_hidrev = va_arg(args, NvU32);
RM_RUNTIME_VARIANT RmVariantHal_rmVariant = va_arg(args, RM_RUNTIME_VARIANT);
NvU32 DispIpHal_ipver = va_arg(args, NvU32);
NvU32 arg_gpuInstance = va_arg(args, NvU32);
status = __nvoc_objCreate_OBJGPU(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance);
return status;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,309 @@
#define NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_gpu_resource_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x5d5d9f = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_funcTable_GpuResource(GpuResource*);
NV_STATUS __nvoc_ctor_GpuResource(GpuResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_GpuResource(GpuResource*);
void __nvoc_dtor_GpuResource(GpuResource*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource;
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuResource,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuResource = {
/*numRelatives=*/ 5,
/*relatives=*/ {
&__nvoc_rtti_GpuResource_GpuResource,
&__nvoc_rtti_GpuResource_RmResource,
&__nvoc_rtti_GpuResource_RmResourceCommon,
&__nvoc_rtti_GpuResource_RsResource,
&__nvoc_rtti_GpuResource_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource =
{
/*classInfo=*/ {
/*size=*/ sizeof(GpuResource),
/*classId=*/ classId(GpuResource),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "GpuResource",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuResource,
/*pCastInfo=*/ &__nvoc_castinfo_GpuResource,
/*pExportInfo=*/ &__nvoc_export_info_GpuResource
};
static NV_STATUS __nvoc_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pCpuMapping);
}
static NvBool __nvoc_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), ppMemDesc);
}
static NvU32 __nvoc_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pReference);
}
static NV_STATUS __nvoc_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams);
}
static NvBool __nvoc_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams);
}
static void __nvoc_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_gpuresControlLookup(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams, ppEntry);
}
static NvBool __nvoc_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_RmResource(RmResource*);
void __nvoc_dtor_GpuResource(GpuResource *pThis) {
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_GpuResource(GpuResource *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_GpuResource(GpuResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail_RmResource;
__nvoc_init_dataField_GpuResource(pThis);
status = __nvoc_gpuresConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail__init;
goto __nvoc_ctor_GpuResource_exit; // Success
__nvoc_ctor_GpuResource_fail__init:
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_ctor_GpuResource_fail_RmResource:
__nvoc_ctor_GpuResource_exit:
return status;
}
static void __nvoc_init_funcTable_GpuResource_1(GpuResource *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__gpuresControl__ = &gpuresControl_IMPL;
pThis->__gpuresMap__ = &gpuresMap_IMPL;
pThis->__gpuresUnmap__ = &gpuresUnmap_IMPL;
pThis->__gpuresShareCallback__ = &gpuresShareCallback_IMPL;
pThis->__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL;
pThis->__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL;
pThis->__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL;
pThis->__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL;
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_GpuResource_resControl;
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_GpuResource_resMap;
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_GpuResource_resUnmap;
pThis->__nvoc_base_RmResource.__rmresShareCallback__ = &__nvoc_thunk_GpuResource_rmresShareCallback;
pThis->__gpuresCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpuresCheckMemInterUnmap;
pThis->__gpuresGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpuresGetMemInterMapParams;
pThis->__gpuresGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor;
pThis->__gpuresGetRefCount__ = &__nvoc_thunk_RsResource_gpuresGetRefCount;
pThis->__gpuresControlFilter__ = &__nvoc_thunk_RsResource_gpuresControlFilter;
pThis->__gpuresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpuresAddAdditionalDependants;
pThis->__gpuresControl_Prologue__ = &__nvoc_thunk_RmResource_gpuresControl_Prologue;
pThis->__gpuresCanCopy__ = &__nvoc_thunk_RsResource_gpuresCanCopy;
pThis->__gpuresMapTo__ = &__nvoc_thunk_RsResource_gpuresMapTo;
pThis->__gpuresPreDestruct__ = &__nvoc_thunk_RsResource_gpuresPreDestruct;
pThis->__gpuresUnmapFrom__ = &__nvoc_thunk_RsResource_gpuresUnmapFrom;
pThis->__gpuresControl_Epilogue__ = &__nvoc_thunk_RmResource_gpuresControl_Epilogue;
pThis->__gpuresControlLookup__ = &__nvoc_thunk_RsResource_gpuresControlLookup;
pThis->__gpuresAccessCallback__ = &__nvoc_thunk_RmResource_gpuresAccessCallback;
}
void __nvoc_init_funcTable_GpuResource(GpuResource *pThis) {
__nvoc_init_funcTable_GpuResource_1(pThis);
}
void __nvoc_init_RmResource(RmResource*);
void __nvoc_init_GpuResource(GpuResource *pThis) {
pThis->__nvoc_pbase_GpuResource = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
__nvoc_init_funcTable_GpuResource(pThis);
}
NV_STATUS __nvoc_objCreate_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
GpuResource *pThis;
pThis = portMemAllocNonPaged(sizeof(GpuResource));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(GpuResource));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuResource);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_GpuResource(pThis);
status = __nvoc_ctor_GpuResource(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_GpuResource_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_GpuResource_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_GpuResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,329 @@
#ifndef _G_GPU_RESOURCE_NVOC_H_
#define _G_GPU_RESOURCE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_gpu_resource_nvoc.h"
#ifndef _GPURESOURCE_H_
#define _GPURESOURCE_H_
#include "core/core.h"
#include "gpu/mem_mgr/mem_desc.h"
#include "rmapi/resource.h"
struct OBJGPU;
#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__
#define __NVOC_CLASS_OBJGPU_TYPEDEF__
typedef struct OBJGPU OBJGPU;
#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJGPU
#define __nvoc_class_id_OBJGPU 0x7ef3cb
#endif /* __nvoc_class_id_OBJGPU */
struct Device;
#ifndef __NVOC_CLASS_Device_TYPEDEF__
#define __NVOC_CLASS_Device_TYPEDEF__
typedef struct Device Device;
#endif /* __NVOC_CLASS_Device_TYPEDEF__ */
#ifndef __nvoc_class_id_Device
#define __nvoc_class_id_Device 0xe0ac20
#endif /* __nvoc_class_id_Device */
struct Subdevice;
#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__
#define __NVOC_CLASS_Subdevice_TYPEDEF__
typedef struct Subdevice Subdevice;
#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */
#ifndef __nvoc_class_id_Subdevice
#define __nvoc_class_id_Subdevice 0x4b01b3
#endif /* __nvoc_class_id_Subdevice */
#define GPU_RES_GET_GPU(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpu
#define GPU_RES_GET_GPUGRP(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpuGrp
#define GPU_RES_GET_DEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pDevice
#define GPU_RES_GET_SUBDEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pSubdevice
#define GPU_RES_SET_THREAD_BC_STATE(pRes) do { \
gpuSetThreadBcState(staticCastNoPtrCheck((pRes), GpuResource)->pGpu, \
staticCastNoPtrCheck((pRes), GpuResource)->bBcResource); \
} while(0)
/*!
* Abstract base class for common CPU mapping operations
*/
#ifdef NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct GpuResource {
const struct NVOC_RTTI *__nvoc_rtti;
struct RmResource __nvoc_base_RmResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
NV_STATUS (*__gpuresControl__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__gpuresMap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NV_STATUS (*__gpuresUnmap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NvBool (*__gpuresShareCallback__)(struct GpuResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__gpuresGetRegBaseOffsetAndSize__)(struct GpuResource *, struct OBJGPU *, NvU32 *, NvU32 *);
NV_STATUS (*__gpuresGetMapAddrSpace__)(struct GpuResource *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NV_STATUS (*__gpuresInternalControlForward__)(struct GpuResource *, NvU32, void *, NvU32);
NvHandle (*__gpuresGetInternalObjectHandle__)(struct GpuResource *);
NV_STATUS (*__gpuresCheckMemInterUnmap__)(struct GpuResource *, NvBool);
NV_STATUS (*__gpuresGetMemInterMapParams__)(struct GpuResource *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__gpuresGetMemoryMappingDescriptor__)(struct GpuResource *, struct MEMORY_DESCRIPTOR **);
NvU32 (*__gpuresGetRefCount__)(struct GpuResource *);
NV_STATUS (*__gpuresControlFilter__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__gpuresAddAdditionalDependants__)(struct RsClient *, struct GpuResource *, RsResourceRef *);
NV_STATUS (*__gpuresControl_Prologue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NvBool (*__gpuresCanCopy__)(struct GpuResource *);
NV_STATUS (*__gpuresMapTo__)(struct GpuResource *, RS_RES_MAP_TO_PARAMS *);
void (*__gpuresPreDestruct__)(struct GpuResource *);
NV_STATUS (*__gpuresUnmapFrom__)(struct GpuResource *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__gpuresControl_Epilogue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__gpuresControlLookup__)(struct GpuResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NvBool (*__gpuresAccessCallback__)(struct GpuResource *, struct RsClient *, void *, RsAccessRight);
struct OBJGPUGRP *pGpuGrp;
struct OBJGPU *pGpu;
struct Device *pDevice;
struct Subdevice *pSubdevice;
NvBool bBcResource;
};
#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__
#define __NVOC_CLASS_GpuResource_TYPEDEF__
typedef struct GpuResource GpuResource;
#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */
#ifndef __nvoc_class_id_GpuResource
#define __nvoc_class_id_GpuResource 0x5d5d9f
#endif /* __nvoc_class_id_GpuResource */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
#define __staticCast_GpuResource(pThis) \
((pThis)->__nvoc_pbase_GpuResource)
#ifdef __nvoc_gpu_resource_h_disabled
#define __dynamicCast_GpuResource(pThis) ((GpuResource*)NULL)
#else //__nvoc_gpu_resource_h_disabled
#define __dynamicCast_GpuResource(pThis) \
((GpuResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuResource)))
#endif //__nvoc_gpu_resource_h_disabled
NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_GpuResource(GpuResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_GpuResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_GpuResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define gpuresControl(pGpuResource, pCallContext, pParams) gpuresControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define gpuresMap(pGpuResource, pCallContext, pParams, pCpuMapping) gpuresMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define gpuresUnmap(pGpuResource, pCallContext, pCpuMapping) gpuresUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define gpuresShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) gpuresShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) gpuresGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define gpuresGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) gpuresGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define gpuresInternalControlForward(pGpuResource, command, pParams, size) gpuresInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define gpuresGetInternalObjectHandle(pGpuResource) gpuresGetInternalObjectHandle_DISPATCH(pGpuResource)
#define gpuresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpuresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define gpuresGetMemInterMapParams(pRmResource, pParams) gpuresGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define gpuresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpuresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define gpuresGetRefCount(pResource) gpuresGetRefCount_DISPATCH(pResource)
#define gpuresControlFilter(pResource, pCallContext, pParams) gpuresControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define gpuresAddAdditionalDependants(pClient, pResource, pReference) gpuresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define gpuresControl_Prologue(pResource, pCallContext, pParams) gpuresControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define gpuresCanCopy(pResource) gpuresCanCopy_DISPATCH(pResource)
#define gpuresMapTo(pResource, pParams) gpuresMapTo_DISPATCH(pResource, pParams)
#define gpuresPreDestruct(pResource) gpuresPreDestruct_DISPATCH(pResource)
#define gpuresUnmapFrom(pResource, pParams) gpuresUnmapFrom_DISPATCH(pResource, pParams)
#define gpuresControl_Epilogue(pResource, pCallContext, pParams) gpuresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define gpuresControlLookup(pResource, pParams, ppEntry) gpuresControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define gpuresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpuresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
NV_STATUS gpuresControl_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
static inline NV_STATUS gpuresControl_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__gpuresControl__(pGpuResource, pCallContext, pParams);
}
NV_STATUS gpuresMap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping);
static inline NV_STATUS gpuresMap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__gpuresMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
NV_STATUS gpuresUnmap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping);
static inline NV_STATUS gpuresUnmap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__gpuresUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
NvBool gpuresShareCallback_IMPL(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy);
static inline NvBool gpuresShareCallback_DISPATCH(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__gpuresShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
NV_STATUS gpuresGetRegBaseOffsetAndSize_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
static inline NV_STATUS gpuresGetRegBaseOffsetAndSize_DISPATCH(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__gpuresGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
NV_STATUS gpuresGetMapAddrSpace_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace);
static inline NV_STATUS gpuresGetMapAddrSpace_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__gpuresGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
NV_STATUS gpuresInternalControlForward_IMPL(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size);
static inline NV_STATUS gpuresInternalControlForward_DISPATCH(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__gpuresInternalControlForward__(pGpuResource, command, pParams, size);
}
NvHandle gpuresGetInternalObjectHandle_IMPL(struct GpuResource *pGpuResource);
static inline NvHandle gpuresGetInternalObjectHandle_DISPATCH(struct GpuResource *pGpuResource) {
return pGpuResource->__gpuresGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS gpuresCheckMemInterUnmap_DISPATCH(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__gpuresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS gpuresGetMemInterMapParams_DISPATCH(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__gpuresGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS gpuresGetMemoryMappingDescriptor_DISPATCH(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__gpuresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NvU32 gpuresGetRefCount_DISPATCH(struct GpuResource *pResource) {
return pResource->__gpuresGetRefCount__(pResource);
}
static inline NV_STATUS gpuresControlFilter_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__gpuresControlFilter__(pResource, pCallContext, pParams);
}
static inline void gpuresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) {
pResource->__gpuresAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS gpuresControl_Prologue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__gpuresControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NvBool gpuresCanCopy_DISPATCH(struct GpuResource *pResource) {
return pResource->__gpuresCanCopy__(pResource);
}
static inline NV_STATUS gpuresMapTo_DISPATCH(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__gpuresMapTo__(pResource, pParams);
}
static inline void gpuresPreDestruct_DISPATCH(struct GpuResource *pResource) {
pResource->__gpuresPreDestruct__(pResource);
}
static inline NV_STATUS gpuresUnmapFrom_DISPATCH(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__gpuresUnmapFrom__(pResource, pParams);
}
static inline void gpuresControl_Epilogue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__gpuresControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS gpuresControlLookup_DISPATCH(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__gpuresControlLookup__(pResource, pParams, ppEntry);
}
static inline NvBool gpuresAccessCallback_DISPATCH(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__gpuresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS gpuresConstruct_IMPL(struct GpuResource *arg_pGpuResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_gpuresConstruct(arg_pGpuResource, arg_pCallContext, arg_pParams) gpuresConstruct_IMPL(arg_pGpuResource, arg_pCallContext, arg_pParams)
NV_STATUS gpuresCopyConstruct_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams);
#ifdef __nvoc_gpu_resource_h_disabled
static inline NV_STATUS gpuresCopyConstruct(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) {
NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_resource_h_disabled
#define gpuresCopyConstruct(pGpuResource, pCallContext, pParams) gpuresCopyConstruct_IMPL(pGpuResource, pCallContext, pParams)
#endif //__nvoc_gpu_resource_h_disabled
void gpuresSetGpu_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource);
#ifdef __nvoc_gpu_resource_h_disabled
static inline void gpuresSetGpu(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource) {
NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!");
}
#else //__nvoc_gpu_resource_h_disabled
#define gpuresSetGpu(pGpuResource, pGpu, bBcResource) gpuresSetGpu_IMPL(pGpuResource, pGpu, bBcResource)
#endif //__nvoc_gpu_resource_h_disabled
void gpuresControlSetup_IMPL(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource);
#ifdef __nvoc_gpu_resource_h_disabled
static inline void gpuresControlSetup(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource) {
NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!");
}
#else //__nvoc_gpu_resource_h_disabled
#define gpuresControlSetup(pParams, pGpuResource) gpuresControlSetup_IMPL(pParams, pGpuResource)
#endif //__nvoc_gpu_resource_h_disabled
NV_STATUS gpuresGetByHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource);
#define gpuresGetByHandle(pClient, hResource, ppGpuResource) gpuresGetByHandle_IMPL(pClient, hResource, ppGpuResource)
NV_STATUS gpuresGetByDeviceOrSubdeviceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource);
#define gpuresGetByDeviceOrSubdeviceHandle(pClient, hResource, ppGpuResource) gpuresGetByDeviceOrSubdeviceHandle_IMPL(pClient, hResource, ppGpuResource)
#undef PRIVATE_FIELD
#endif // _GPURESOURCE_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_GPU_RESOURCE_NVOC_H_

View File

@@ -0,0 +1,142 @@
// This file is automatically generated by rmconfig - DO NOT EDIT!
//
// HAL support for use in HAL setup
//
// Profile: devel-soc-disp-dce-client
// Template: templates/gt_hal.h
//
#ifndef _G_RMCFG_HAL_H_
#define _G_RMCFG_HAL_H_
typedef struct DISP_HAL_IFACES *PDISP_HAL_IFACES;
typedef struct DPU_HAL_IFACES *PDPU_HAL_IFACES;
typedef struct GPIO_HAL_IFACES *PGPIO_HAL_IFACES;
typedef struct RPC_HAL_IFACES *PRPC_HAL_IFACES;
typedef struct RPCSTRUCTURECOPY_HAL_IFACES *PRPCSTRUCTURECOPY_HAL_IFACES;
//
// per-GPU list of function ptrs to setup iface for each engine
//
typedef struct {
} HAL_IFACE_SETUP, *PHAL_IFACE_SETUP;
//
// IP_VERSIONS support
//
typedef struct IGRP_IP_VERSIONS_TABLE_INFO IGRP_IP_VERSIONS_TABLE_INFO;
// generic form of Head_iGrp_ipVersions_getInfo typedef
typedef NV_STATUS IGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *);
typedef void IGrp_ipVersions_install(IGRP_IP_VERSIONS_TABLE_INFO *);
typedef NV_STATUS IGrp_ipVersions_wrapup(IGRP_IP_VERSIONS_TABLE_INFO *);
// a single inclusive version range
typedef struct {
NvU32 v0;
NvU32 v1;
} IGRP_IP_VERSION_RANGE;
typedef struct {
const IGRP_IP_VERSION_RANGE *pRanges;
NvU32 numRanges;
IGrp_ipVersions_install *ifacesInstallFn;
} IGRP_IP_VERSIONS_ENTRY;
struct IGRP_IP_VERSIONS_TABLE_INFO {
POBJGPU pGpu;
Dynamic *pDynamic; // eg: pBiff
const IGRP_IP_VERSIONS_ENTRY *pTable;
NvU32 numEntries;
IGrp_ipVersions_wrapup *ifacesWrapupFn; // overrides and asserts
};
// HAL_IMPLEMENTATION enum
typedef enum
{
HAL_IMPL_GF100,
HAL_IMPL_GF100B,
HAL_IMPL_GF104,
HAL_IMPL_GF104B,
HAL_IMPL_GF106,
HAL_IMPL_GF106B,
HAL_IMPL_GF108,
HAL_IMPL_GF110D,
HAL_IMPL_GF110,
HAL_IMPL_GF117,
HAL_IMPL_GF118,
HAL_IMPL_GF119,
HAL_IMPL_GF110F,
HAL_IMPL_GF110F2,
HAL_IMPL_GF110F3,
HAL_IMPL_GK104,
HAL_IMPL_GK106,
HAL_IMPL_GK107,
HAL_IMPL_GK20A,
HAL_IMPL_GK110,
HAL_IMPL_GK110B,
HAL_IMPL_GK110C,
HAL_IMPL_GK208,
HAL_IMPL_GK208S,
HAL_IMPL_GM107,
HAL_IMPL_GM108,
HAL_IMPL_GM200,
HAL_IMPL_GM204,
HAL_IMPL_GM206,
HAL_IMPL_GP100,
HAL_IMPL_GP102,
HAL_IMPL_GP104,
HAL_IMPL_GP106,
HAL_IMPL_GP107,
HAL_IMPL_GP108,
HAL_IMPL_GV100,
HAL_IMPL_GV11B,
HAL_IMPL_TU102,
HAL_IMPL_TU104,
HAL_IMPL_TU106,
HAL_IMPL_TU116,
HAL_IMPL_TU117,
HAL_IMPL_GA100,
HAL_IMPL_GA102,
HAL_IMPL_GA103,
HAL_IMPL_GA104,
HAL_IMPL_GA106,
HAL_IMPL_GA107,
HAL_IMPL_GA10B,
HAL_IMPL_GA102F,
HAL_IMPL_T001_FERMI_NOT_EXIST,
HAL_IMPL_T124,
HAL_IMPL_T132,
HAL_IMPL_T210,
HAL_IMPL_T186,
HAL_IMPL_T194,
HAL_IMPL_T002_TURING_NOT_EXIST,
HAL_IMPL_T234,
HAL_IMPL_T234D,
HAL_IMPL_AMODEL,
HAL_IMPL_MAXIMUM, // NOTE: this symbol must be at the end of the enum list.
// It is used to allocate arrays and control loop iterations.
} HAL_IMPLEMENTATION;
//
// HAL implementation names for debug & logging use
//
#define HAL_IMPL_NAME_LIST \
{ HAL_IMPL_T234D, "T234D" }
#endif // _G_RMCFG_HAL_H_

View File

@@ -0,0 +1,94 @@
// This file is automatically generated by rmconfig - DO NOT EDIT!
//
// Hal registration entry points.
//
// Profile: devel-soc-disp-dce-client
// Template: templates/gt_hal_archimpl.h
//
// Chips: T234D
//
#ifndef _G_RMCFG_HAL_ARCHIMPL_H_
#define _G_RMCFG_HAL_ARCHIMPL_H_
#include "g_hal.h"
// OpenRM for Tegra build uses different include path
// The following lines refer to the same file.
// TODO: merge them
#include "nv_ref.h"
//
// CHIPID array Implementation
//
const struct ChipID
{
NvU32 arch;
NvU32 impl;
NvU32 hidrev;
} chipID[] = {
{ 0x0, 0x0, 0x0 } , // GF100 (disabled)
{ 0x0, 0x0, 0x0 } , // GF100B (disabled)
{ 0x0, 0x0, 0x0 } , // GF104 (disabled)
{ 0x0, 0x0, 0x0 } , // GF104B (disabled)
{ 0x0, 0x0, 0x0 } , // GF106 (disabled)
{ 0x0, 0x0, 0x0 } , // GF106B (disabled)
{ 0x0, 0x0, 0x0 } , // GF108 (disabled)
{ 0x0, 0x0, 0x0 } , // GF110D (disabled)
{ 0x0, 0x0, 0x0 } , // GF110 (disabled)
{ 0x0, 0x0, 0x0 } , // GF117 (disabled)
{ 0x0, 0x0, 0x0 } , // GF118 (disabled)
{ 0x0, 0x0, 0x0 } , // GF119 (disabled)
{ 0x0, 0x0, 0x0 } , // GF110F (disabled)
{ 0x0, 0x0, 0x0 } , // GF110F2 (disabled)
{ 0x0, 0x0, 0x0 } , // GF110F3 (disabled)
{ 0x0, 0x0, 0x0 } , // GK104 (disabled)
{ 0x0, 0x0, 0x0 } , // GK106 (disabled)
{ 0x0, 0x0, 0x0 } , // GK107 (disabled)
{ 0x0, 0x0, 0x0 } , // GK20A (disabled)
{ 0x0, 0x0, 0x0 } , // GK110 (disabled)
{ 0x0, 0x0, 0x0 } , // GK110B (disabled)
{ 0x0, 0x0, 0x0 } , // GK110C (disabled)
{ 0x0, 0x0, 0x0 } , // GK208 (disabled)
{ 0x0, 0x0, 0x0 } , // GK208S (disabled)
{ 0x0, 0x0, 0x0 } , // GM107 (disabled)
{ 0x0, 0x0, 0x0 } , // GM108 (disabled)
{ 0x0, 0x0, 0x0 } , // GM200 (disabled)
{ 0x0, 0x0, 0x0 } , // GM204 (disabled)
{ 0x0, 0x0, 0x0 } , // GM206 (disabled)
{ 0x0, 0x0, 0x0 } , // GP100 (disabled)
{ 0x0, 0x0, 0x0 } , // GP102 (disabled)
{ 0x0, 0x0, 0x0 } , // GP104 (disabled)
{ 0x0, 0x0, 0x0 } , // GP106 (disabled)
{ 0x0, 0x0, 0x0 } , // GP107 (disabled)
{ 0x0, 0x0, 0x0 } , // GP108 (disabled)
{ 0x0, 0x0, 0x0 } , // GV100 (disabled)
{ 0x0, 0x0, 0x0 } , // GV11B (disabled)
{ 0x0, 0x0, 0x0 } , // TU102 (disabled)
{ 0x0, 0x0, 0x0 } , // TU104 (disabled)
{ 0x0, 0x0, 0x0 } , // TU106 (disabled)
{ 0x0, 0x0, 0x0 } , // TU116 (disabled)
{ 0x0, 0x0, 0x0 } , // TU117 (disabled)
{ 0x0, 0x0, 0x0 } , // GA100 (disabled)
{ 0x0, 0x0, 0x0 } , // GA102 (disabled)
{ 0x0, 0x0, 0x0 } , // GA103 (disabled)
{ 0x0, 0x0, 0x0 } , // GA104 (disabled)
{ 0x0, 0x0, 0x0 } , // GA106 (disabled)
{ 0x0, 0x0, 0x0 } , // GA107 (disabled)
{ 0x0, 0x0, 0x0 } , // GA10B (disabled)
{ 0x0, 0x0, 0x0 } , // GA102F (disabled)
{ 0x0, 0x0, 0x0 } , // T001_FERMI_NOT_EXIST (disabled)
{ 0x0, 0x0, 0x0 } , // T124 (disabled)
{ 0x0, 0x0, 0x0 } , // T132 (disabled)
{ 0x0, 0x0, 0x0 } , // T210 (disabled)
{ 0x0, 0x0, 0x0 } , // T186 (disabled)
{ 0x0, 0x0, 0x0 } , // T194 (disabled)
{ 0x0, 0x0, 0x0 } , // T002_TURING_NOT_EXIST (disabled)
{ 0x0, 0x0, 0x0 } , // T234 (disabled)
{ 0x0, 0x0, 0x235 } , // T234D
{ 0x0, 0x0, 0x0 } , // AMODEL (disabled)
};
#endif // _G_RMCFG_HAL_ARCHIMPL_H_

View File

@@ -0,0 +1,154 @@
#define NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_hal_mgr_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xbf26de = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_OBJHALMGR(OBJHALMGR*);
void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR*);
NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR*);
void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR*);
void __nvoc_dtor_OBJHALMGR(OBJHALMGR*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR;
static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_OBJHALMGR = {
/*pClassDef=*/ &__nvoc_class_def_OBJHALMGR,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHALMGR,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJHALMGR, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHALMGR = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_OBJHALMGR_OBJHALMGR,
&__nvoc_rtti_OBJHALMGR_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJHALMGR),
/*classId=*/ classId(OBJHALMGR),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJHALMGR",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHALMGR,
/*pCastInfo=*/ &__nvoc_castinfo_OBJHALMGR,
/*pExportInfo=*/ &__nvoc_export_info_OBJHALMGR
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_OBJHALMGR(OBJHALMGR *pThis) {
__nvoc_halmgrDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail_Object;
__nvoc_init_dataField_OBJHALMGR(pThis);
status = __nvoc_halmgrConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail__init;
goto __nvoc_ctor_OBJHALMGR_exit; // Success
__nvoc_ctor_OBJHALMGR_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_OBJHALMGR_fail_Object:
__nvoc_ctor_OBJHALMGR_exit:
return status;
}
static void __nvoc_init_funcTable_OBJHALMGR_1(OBJHALMGR *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR *pThis) {
__nvoc_init_funcTable_OBJHALMGR_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_OBJHALMGR(OBJHALMGR *pThis) {
pThis->__nvoc_pbase_OBJHALMGR = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_OBJHALMGR(pThis);
}
NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJHALMGR *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJHALMGR));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJHALMGR));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHALMGR);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJHALMGR(pThis);
status = __nvoc_ctor_OBJHALMGR(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJHALMGR_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJHALMGR_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJHALMGR(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,139 @@
#ifndef _G_HAL_MGR_NVOC_H_
#define _G_HAL_MGR_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_hal_mgr_nvoc.h"
#ifndef _HAL_MGR_H_
#define _HAL_MGR_H_
#include "core/core.h"
#include "core/info_block.h"
#include "core/hal.h"
#define HALMGR_GET_HAL(p, halid) halmgrGetHal((p), halid)
typedef struct OBJHALMGR *POBJHALMGR;
#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__
#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__
typedef struct OBJHALMGR OBJHALMGR;
#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHALMGR
#define __nvoc_class_id_OBJHALMGR 0xbf26de
#endif /* __nvoc_class_id_OBJHALMGR */
#ifdef NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJHALMGR {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJHALMGR *__nvoc_pbase_OBJHALMGR;
struct OBJHAL *pHalList[60];
};
#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__
#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__
typedef struct OBJHALMGR OBJHALMGR;
#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHALMGR
#define __nvoc_class_id_OBJHALMGR 0xbf26de
#endif /* __nvoc_class_id_OBJHALMGR */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR;
#define __staticCast_OBJHALMGR(pThis) \
((pThis)->__nvoc_pbase_OBJHALMGR)
#ifdef __nvoc_hal_mgr_h_disabled
#define __dynamicCast_OBJHALMGR(pThis) ((OBJHALMGR*)NULL)
#else //__nvoc_hal_mgr_h_disabled
#define __dynamicCast_OBJHALMGR(pThis) \
((OBJHALMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHALMGR)))
#endif //__nvoc_hal_mgr_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32);
#define __objCreate_OBJHALMGR(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJHALMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS halmgrConstruct_IMPL(struct OBJHALMGR *arg_);
#define __nvoc_halmgrConstruct(arg_) halmgrConstruct_IMPL(arg_)
void halmgrDestruct_IMPL(struct OBJHALMGR *arg0);
#define __nvoc_halmgrDestruct(arg0) halmgrDestruct_IMPL(arg0)
NV_STATUS halmgrCreateHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1);
#ifdef __nvoc_hal_mgr_h_disabled
static inline NV_STATUS halmgrCreateHal(struct OBJHALMGR *arg0, NvU32 arg1) {
NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_hal_mgr_h_disabled
#define halmgrCreateHal(arg0, arg1) halmgrCreateHal_IMPL(arg0, arg1)
#endif //__nvoc_hal_mgr_h_disabled
NV_STATUS halmgrGetHalForGpu_IMPL(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3);
#ifdef __nvoc_hal_mgr_h_disabled
static inline NV_STATUS halmgrGetHalForGpu(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3) {
NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_hal_mgr_h_disabled
#define halmgrGetHalForGpu(arg0, arg1, arg2, arg3) halmgrGetHalForGpu_IMPL(arg0, arg1, arg2, arg3)
#endif //__nvoc_hal_mgr_h_disabled
struct OBJHAL *halmgrGetHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1);
#ifdef __nvoc_hal_mgr_h_disabled
static inline struct OBJHAL *halmgrGetHal(struct OBJHALMGR *arg0, NvU32 arg1) {
NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!");
return NULL;
}
#else //__nvoc_hal_mgr_h_disabled
#define halmgrGetHal(arg0, arg1) halmgrGetHal_IMPL(arg0, arg1)
#endif //__nvoc_hal_mgr_h_disabled
#undef PRIVATE_FIELD
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_HAL_MGR_NVOC_H_

View File

@@ -0,0 +1,148 @@
#define NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_hal_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xe803b6 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_OBJHAL(OBJHAL*);
void __nvoc_init_funcTable_OBJHAL(OBJHAL*);
NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL*);
void __nvoc_init_dataField_OBJHAL(OBJHAL*);
void __nvoc_dtor_OBJHAL(OBJHAL*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL;
static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_OBJHAL = {
/*pClassDef=*/ &__nvoc_class_def_OBJHAL,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHAL,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJHAL, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHAL = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_OBJHAL_OBJHAL,
&__nvoc_rtti_OBJHAL_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJHAL),
/*classId=*/ classId(OBJHAL),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJHAL",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHAL,
/*pCastInfo=*/ &__nvoc_castinfo_OBJHAL,
/*pExportInfo=*/ &__nvoc_export_info_OBJHAL
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_OBJHAL(OBJHAL *pThis) {
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJHAL(OBJHAL *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_OBJHAL_fail_Object;
__nvoc_init_dataField_OBJHAL(pThis);
goto __nvoc_ctor_OBJHAL_exit; // Success
__nvoc_ctor_OBJHAL_fail_Object:
__nvoc_ctor_OBJHAL_exit:
return status;
}
static void __nvoc_init_funcTable_OBJHAL_1(OBJHAL *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_funcTable_OBJHAL(OBJHAL *pThis) {
__nvoc_init_funcTable_OBJHAL_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_OBJHAL(OBJHAL *pThis) {
pThis->__nvoc_pbase_OBJHAL = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_OBJHAL(pThis);
}
NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJHAL *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJHAL));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJHAL));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHAL);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJHAL(pThis);
status = __nvoc_ctor_OBJHAL(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJHAL_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJHAL_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJHAL(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,146 @@
#ifndef _G_HAL_NVOC_H_
#define _G_HAL_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_hal_nvoc.h"
#ifndef _OBJHAL_H_
#define _OBJHAL_H_
/**************** Resource Manager Defines and Structures ******************\
* *
* Module: hal.h *
* Defines and structures used for the HAL Object. *
* *
\***************************************************************************/
#include "core/core.h"
#include "core/info_block.h"
//
// HAL Info Block Id:
//
// 31 7 0
// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | 24 bits | 8 bits |
// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Info ID # Impl
//
// Impl: The hal implementation
// Info ID number: unique id for a particular info type
//
#define MKHALINFOID(impl,infoId) (((infoId & 0xffffff) << 8) | (impl & 0xff))
typedef struct MODULEDESCRIPTOR MODULEDESCRIPTOR, *PMODULEDESCRIPTOR;
struct MODULEDESCRIPTOR {
// (rmconfig) per-obj function ptr to init hal interfaces
const HAL_IFACE_SETUP *pHalSetIfaces;
};
typedef struct OBJHAL *POBJHAL;
#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__
#define __NVOC_CLASS_OBJHAL_TYPEDEF__
typedef struct OBJHAL OBJHAL;
#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHAL
#define __nvoc_class_id_OBJHAL 0xe803b6
#endif /* __nvoc_class_id_OBJHAL */
#ifdef NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJHAL {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJHAL *__nvoc_pbase_OBJHAL;
struct MODULEDESCRIPTOR moduleDescriptor;
};
#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__
#define __NVOC_CLASS_OBJHAL_TYPEDEF__
typedef struct OBJHAL OBJHAL;
#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHAL
#define __nvoc_class_id_OBJHAL 0xe803b6
#endif /* __nvoc_class_id_OBJHAL */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL;
#define __staticCast_OBJHAL(pThis) \
((pThis)->__nvoc_pbase_OBJHAL)
#ifdef __nvoc_hal_h_disabled
#define __dynamicCast_OBJHAL(pThis) ((OBJHAL*)NULL)
#else //__nvoc_hal_h_disabled
#define __dynamicCast_OBJHAL(pThis) \
((OBJHAL*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHAL)))
#endif //__nvoc_hal_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL**, Dynamic*, NvU32);
#define __objCreate_OBJHAL(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJHAL((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
PMODULEDESCRIPTOR objhalGetModuleDescriptor_IMPL(struct OBJHAL *pHal);
#ifdef __nvoc_hal_h_disabled
static inline PMODULEDESCRIPTOR objhalGetModuleDescriptor(struct OBJHAL *pHal) {
NV_ASSERT_FAILED_PRECOMP("OBJHAL was disabled!");
return NULL;
}
#else //__nvoc_hal_h_disabled
#define objhalGetModuleDescriptor(pHal) objhalGetModuleDescriptor_IMPL(pHal)
#endif //__nvoc_hal_h_disabled
#undef PRIVATE_FIELD
//--------------------------------------------------------------------
// RM routines.
//--------------------------------------------------------------------
NV_STATUS ipVersionsSetupHal(struct OBJGPU *, void *pDynamic, IGrp_ipVersions_getInfo getInfoFn);
#endif // _OBJHAL_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_HAL_NVOC_H_

View File

@@ -0,0 +1,66 @@
// This file is automatically generated by rmconfig - DO NOT EDIT!
//
// Private HAL support for halgen.
//
// Profile: devel-soc-disp-dce-client
// Template: templates/gt_hal_private.h
//
// Chips: T234D
//
//
// This file is included in several .c files for chips hal register and engines
// hal function assignment. The macros RMCFG_ENGINE_SETUP and RMCFG_HAL_SETUP_xxx
// are used to provide different content for those .c files.
//
#ifndef _G_RMCFG_HAL_PRIVATE_H_
#define _G_RMCFG_HAL_PRIVATE_H_
#include "g_hal.h"
// establish the per-chip RMCFG_HAL_SETUP_chip #defines as needed.
#if defined(RMCFG_ENGINE_SETUP)
// setup all enabled chip families
#if defined(RMCFG_HAL_SETUP_ALL)
# define RMCFG_HAL_SETUP_T23XD 1
#endif // RMCFG_HAL_SETUP_ALL
//
// setup all enabled chips in each enabled family
//
#if defined(RMCFG_HAL_SETUP_T23XD)
# define RMCFG_HAL_SETUP_T234D 1
#endif // T23XD
#endif // RMCFG_ENGINE_SETUP
// pull in private headers for each engine
//
// per-GPU structure with an interface init function for each engine
//
// registerHalModule function declaration
NV_STATUS registerHalModule(NvU32, const HAL_IFACE_SETUP *);
#if defined(RMCFG_HAL_SETUP_T234D)
static const HAL_IFACE_SETUP halIface_T234D = {
};
NV_STATUS registerHalModule_T234D(void)
{
return registerHalModule(HAL_IMPL_T234D, &halIface_T234D);
}
#endif // T23XD or T234D
#endif // _G_RMCFG_HAL_PRIVATE_H_

View File

@@ -0,0 +1,51 @@
// This file is automatically generated by rmconfig - DO NOT EDIT!
//
// Hal registration entry points.
//
// Profile: devel-soc-disp-dce-client
// Template: templates/gt_hal_register.h
//
// Chips: T234D
//
#ifndef _G_RMCFG_HAL_REGISTER_H_
#define _G_RMCFG_HAL_REGISTER_H_
//
// per-family HAL registration entry points
//
NV_STATUS registerHalModule_T234D(void);
static NV_STATUS NV_INLINE REGISTER_T23XD_HALS(void)
{
NV_STATUS rmStatus;
rmStatus = registerHalModule_T234D();
if (rmStatus != NV_OK)
return rmStatus;
return NV_OK;
}
//
// This routine can be used by platform dependent code to
// enable all HAL modules.
//
static NV_STATUS NV_INLINE REGISTER_ALL_HALS(void)
{
NV_STATUS rmStatus;
rmStatus = REGISTER_T23XD_HALS();
if (rmStatus != NV_OK)
{
return rmStatus;
}
return NV_OK;
}
#endif // _G_RMCFG_HAL_REGISTER_H_

View File

@@ -0,0 +1,327 @@
#define NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_hda_codec_api_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xf59a20 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_Hdacodec(Hdacodec*);
void __nvoc_init_funcTable_Hdacodec(Hdacodec*);
NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_Hdacodec(Hdacodec*);
void __nvoc_dtor_Hdacodec(Hdacodec*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec;
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Hdacodec = {
/*pClassDef=*/ &__nvoc_class_def_Hdacodec,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Hdacodec,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RsResource = {
/*pClassDef=*/ &__nvoc_class_def_RsResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
};
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResourceCommon = {
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
};
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResource = {
/*pClassDef=*/ &__nvoc_class_def_RmResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource),
};
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_GpuResource = {
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_Hdacodec = {
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_Hdacodec_Hdacodec,
&__nvoc_rtti_Hdacodec_GpuResource,
&__nvoc_rtti_Hdacodec_RmResource,
&__nvoc_rtti_Hdacodec_RmResourceCommon,
&__nvoc_rtti_Hdacodec_RsResource,
&__nvoc_rtti_Hdacodec_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec =
{
/*classInfo=*/ {
/*size=*/ sizeof(Hdacodec),
/*classId=*/ classId(Hdacodec),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "Hdacodec",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Hdacodec,
/*pCastInfo=*/ &__nvoc_castinfo_Hdacodec,
/*pExportInfo=*/ &__nvoc_export_info_Hdacodec
};
static NvBool __nvoc_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pCpuMapping);
}
static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), ppMemDesc);
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
}
static NvHandle __nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource) {
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pCallContext, pParams);
}
static void __nvoc_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) {
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pReference);
}
static NvU32 __nvoc_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource) {
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) {
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), bSubdeviceHandleProvided);
}
static NV_STATUS __nvoc_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams);
}
static NV_STATUS __nvoc_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pGpu, pOffset, pSize);
}
static NvBool __nvoc_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource) {
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), command, pParams, size);
}
static void __nvoc_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource) {
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
}
static NV_STATUS __nvoc_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams);
}
static void __nvoc_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams);
}
static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlLookup(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams, ppEntry);
}
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams, pCpuMapping);
}
static NvBool __nvoc_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_GpuResource(GpuResource*);
void __nvoc_dtor_Hdacodec(Hdacodec *pThis) {
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_Hdacodec(Hdacodec *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail_GpuResource;
__nvoc_init_dataField_Hdacodec(pThis);
status = __nvoc_hdacodecConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail__init;
goto __nvoc_ctor_Hdacodec_exit; // Success
__nvoc_ctor_Hdacodec_fail__init:
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_ctor_Hdacodec_fail_GpuResource:
__nvoc_ctor_Hdacodec_exit:
return status;
}
static void __nvoc_init_funcTable_Hdacodec_1(Hdacodec *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__hdacodecShareCallback__ = &__nvoc_thunk_GpuResource_hdacodecShareCallback;
pThis->__hdacodecControl__ = &__nvoc_thunk_GpuResource_hdacodecControl;
pThis->__hdacodecUnmap__ = &__nvoc_thunk_GpuResource_hdacodecUnmap;
pThis->__hdacodecGetMemInterMapParams__ = &__nvoc_thunk_RmResource_hdacodecGetMemInterMapParams;
pThis->__hdacodecGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor;
pThis->__hdacodecGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace;
pThis->__hdacodecGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle;
pThis->__hdacodecControlFilter__ = &__nvoc_thunk_RsResource_hdacodecControlFilter;
pThis->__hdacodecAddAdditionalDependants__ = &__nvoc_thunk_RsResource_hdacodecAddAdditionalDependants;
pThis->__hdacodecGetRefCount__ = &__nvoc_thunk_RsResource_hdacodecGetRefCount;
pThis->__hdacodecCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap;
pThis->__hdacodecMapTo__ = &__nvoc_thunk_RsResource_hdacodecMapTo;
pThis->__hdacodecControl_Prologue__ = &__nvoc_thunk_RmResource_hdacodecControl_Prologue;
pThis->__hdacodecGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize;
pThis->__hdacodecCanCopy__ = &__nvoc_thunk_RsResource_hdacodecCanCopy;
pThis->__hdacodecInternalControlForward__ = &__nvoc_thunk_GpuResource_hdacodecInternalControlForward;
pThis->__hdacodecPreDestruct__ = &__nvoc_thunk_RsResource_hdacodecPreDestruct;
pThis->__hdacodecUnmapFrom__ = &__nvoc_thunk_RsResource_hdacodecUnmapFrom;
pThis->__hdacodecControl_Epilogue__ = &__nvoc_thunk_RmResource_hdacodecControl_Epilogue;
pThis->__hdacodecControlLookup__ = &__nvoc_thunk_RsResource_hdacodecControlLookup;
pThis->__hdacodecMap__ = &__nvoc_thunk_GpuResource_hdacodecMap;
pThis->__hdacodecAccessCallback__ = &__nvoc_thunk_RmResource_hdacodecAccessCallback;
}
void __nvoc_init_funcTable_Hdacodec(Hdacodec *pThis) {
__nvoc_init_funcTable_Hdacodec_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_Hdacodec(Hdacodec *pThis) {
pThis->__nvoc_pbase_Hdacodec = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_Hdacodec(pThis);
}
NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status;
Object *pParentObj;
Hdacodec *pThis;
pThis = portMemAllocNonPaged(sizeof(Hdacodec));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(Hdacodec));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Hdacodec);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_Hdacodec(pThis);
status = __nvoc_ctor_Hdacodec(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_Hdacodec_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_Hdacodec_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
status = __nvoc_objCreate_Hdacodec(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
return status;
}

View File

@@ -0,0 +1,229 @@
#ifndef _G_HDA_CODEC_API_NVOC_H_
#define _G_HDA_CODEC_API_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_hda_codec_api_nvoc.h"
#ifndef HDA_CODEC_API_H
#define HDA_CODEC_API_H
#include "resserv/resserv.h"
#include "nvoc/prelude.h"
#include "resserv/rs_resource.h"
#include "ctrl/ctrl90ec.h"
#include "gpu/gpu_resource.h"
#ifdef NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct Hdacodec {
const struct NVOC_RTTI *__nvoc_rtti;
struct GpuResource __nvoc_base_GpuResource;
struct Object *__nvoc_pbase_Object;
struct RsResource *__nvoc_pbase_RsResource;
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
struct RmResource *__nvoc_pbase_RmResource;
struct GpuResource *__nvoc_pbase_GpuResource;
struct Hdacodec *__nvoc_pbase_Hdacodec;
NvBool (*__hdacodecShareCallback__)(struct Hdacodec *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__hdacodecControl__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__hdacodecUnmap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RsCpuMapping *);
NV_STATUS (*__hdacodecGetMemInterMapParams__)(struct Hdacodec *, RMRES_MEM_INTER_MAP_PARAMS *);
NV_STATUS (*__hdacodecGetMemoryMappingDescriptor__)(struct Hdacodec *, struct MEMORY_DESCRIPTOR **);
NV_STATUS (*__hdacodecGetMapAddrSpace__)(struct Hdacodec *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
NvHandle (*__hdacodecGetInternalObjectHandle__)(struct Hdacodec *);
NV_STATUS (*__hdacodecControlFilter__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
void (*__hdacodecAddAdditionalDependants__)(struct RsClient *, struct Hdacodec *, RsResourceRef *);
NvU32 (*__hdacodecGetRefCount__)(struct Hdacodec *);
NV_STATUS (*__hdacodecCheckMemInterUnmap__)(struct Hdacodec *, NvBool);
NV_STATUS (*__hdacodecMapTo__)(struct Hdacodec *, RS_RES_MAP_TO_PARAMS *);
NV_STATUS (*__hdacodecControl_Prologue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__hdacodecGetRegBaseOffsetAndSize__)(struct Hdacodec *, struct OBJGPU *, NvU32 *, NvU32 *);
NvBool (*__hdacodecCanCopy__)(struct Hdacodec *);
NV_STATUS (*__hdacodecInternalControlForward__)(struct Hdacodec *, NvU32, void *, NvU32);
void (*__hdacodecPreDestruct__)(struct Hdacodec *);
NV_STATUS (*__hdacodecUnmapFrom__)(struct Hdacodec *, RS_RES_UNMAP_FROM_PARAMS *);
void (*__hdacodecControl_Epilogue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__hdacodecControlLookup__)(struct Hdacodec *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__hdacodecMap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__hdacodecAccessCallback__)(struct Hdacodec *, struct RsClient *, void *, RsAccessRight);
};
#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__
#define __NVOC_CLASS_Hdacodec_TYPEDEF__
typedef struct Hdacodec Hdacodec;
#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */
#ifndef __nvoc_class_id_Hdacodec
#define __nvoc_class_id_Hdacodec 0xf59a20
#endif /* __nvoc_class_id_Hdacodec */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec;
#define __staticCast_Hdacodec(pThis) \
((pThis)->__nvoc_pbase_Hdacodec)
#ifdef __nvoc_hda_codec_api_h_disabled
#define __dynamicCast_Hdacodec(pThis) ((Hdacodec*)NULL)
#else //__nvoc_hda_codec_api_h_disabled
#define __dynamicCast_Hdacodec(pThis) \
((Hdacodec*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Hdacodec)))
#endif //__nvoc_hda_codec_api_h_disabled
NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
#define __objCreate_Hdacodec(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
__nvoc_objCreate_Hdacodec((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define hdacodecShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) hdacodecShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define hdacodecControl(pGpuResource, pCallContext, pParams) hdacodecControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define hdacodecUnmap(pGpuResource, pCallContext, pCpuMapping) hdacodecUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
#define hdacodecGetMemInterMapParams(pRmResource, pParams) hdacodecGetMemInterMapParams_DISPATCH(pRmResource, pParams)
#define hdacodecGetMemoryMappingDescriptor(pRmResource, ppMemDesc) hdacodecGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
#define hdacodecGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) hdacodecGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
#define hdacodecGetInternalObjectHandle(pGpuResource) hdacodecGetInternalObjectHandle_DISPATCH(pGpuResource)
#define hdacodecControlFilter(pResource, pCallContext, pParams) hdacodecControlFilter_DISPATCH(pResource, pCallContext, pParams)
#define hdacodecAddAdditionalDependants(pClient, pResource, pReference) hdacodecAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
#define hdacodecGetRefCount(pResource) hdacodecGetRefCount_DISPATCH(pResource)
#define hdacodecCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) hdacodecCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define hdacodecMapTo(pResource, pParams) hdacodecMapTo_DISPATCH(pResource, pParams)
#define hdacodecControl_Prologue(pResource, pCallContext, pParams) hdacodecControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
#define hdacodecGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) hdacodecGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
#define hdacodecCanCopy(pResource) hdacodecCanCopy_DISPATCH(pResource)
#define hdacodecInternalControlForward(pGpuResource, command, pParams, size) hdacodecInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
#define hdacodecPreDestruct(pResource) hdacodecPreDestruct_DISPATCH(pResource)
#define hdacodecUnmapFrom(pResource, pParams) hdacodecUnmapFrom_DISPATCH(pResource, pParams)
#define hdacodecControl_Epilogue(pResource, pCallContext, pParams) hdacodecControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
#define hdacodecControlLookup(pResource, pParams, ppEntry) hdacodecControlLookup_DISPATCH(pResource, pParams, ppEntry)
#define hdacodecMap(pGpuResource, pCallContext, pParams, pCpuMapping) hdacodecMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
#define hdacodecAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hdacodecAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
static inline NvBool hdacodecShareCallback_DISPATCH(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__hdacodecShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}
static inline NV_STATUS hdacodecControl_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pGpuResource->__hdacodecControl__(pGpuResource, pCallContext, pParams);
}
static inline NV_STATUS hdacodecUnmap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__hdacodecUnmap__(pGpuResource, pCallContext, pCpuMapping);
}
static inline NV_STATUS hdacodecGetMemInterMapParams_DISPATCH(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
return pRmResource->__hdacodecGetMemInterMapParams__(pRmResource, pParams);
}
static inline NV_STATUS hdacodecGetMemoryMappingDescriptor_DISPATCH(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
return pRmResource->__hdacodecGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
}
static inline NV_STATUS hdacodecGetMapAddrSpace_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
return pGpuResource->__hdacodecGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
}
static inline NvHandle hdacodecGetInternalObjectHandle_DISPATCH(struct Hdacodec *pGpuResource) {
return pGpuResource->__hdacodecGetInternalObjectHandle__(pGpuResource);
}
static inline NV_STATUS hdacodecControlFilter_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__hdacodecControlFilter__(pResource, pCallContext, pParams);
}
static inline void hdacodecAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) {
pResource->__hdacodecAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NvU32 hdacodecGetRefCount_DISPATCH(struct Hdacodec *pResource) {
return pResource->__hdacodecGetRefCount__(pResource);
}
static inline NV_STATUS hdacodecCheckMemInterUnmap_DISPATCH(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) {
return pRmResource->__hdacodecCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
}
static inline NV_STATUS hdacodecMapTo_DISPATCH(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
return pResource->__hdacodecMapTo__(pResource, pParams);
}
static inline NV_STATUS hdacodecControl_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
return pResource->__hdacodecControl_Prologue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS hdacodecGetRegBaseOffsetAndSize_DISPATCH(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
return pGpuResource->__hdacodecGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
}
static inline NvBool hdacodecCanCopy_DISPATCH(struct Hdacodec *pResource) {
return pResource->__hdacodecCanCopy__(pResource);
}
static inline NV_STATUS hdacodecInternalControlForward_DISPATCH(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
return pGpuResource->__hdacodecInternalControlForward__(pGpuResource, command, pParams, size);
}
static inline void hdacodecPreDestruct_DISPATCH(struct Hdacodec *pResource) {
pResource->__hdacodecPreDestruct__(pResource);
}
static inline NV_STATUS hdacodecUnmapFrom_DISPATCH(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
return pResource->__hdacodecUnmapFrom__(pResource, pParams);
}
static inline void hdacodecControl_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
pResource->__hdacodecControl_Epilogue__(pResource, pCallContext, pParams);
}
static inline NV_STATUS hdacodecControlLookup_DISPATCH(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
return pResource->__hdacodecControlLookup__(pResource, pParams, ppEntry);
}
static inline NV_STATUS hdacodecMap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
return pGpuResource->__hdacodecMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
}
static inline NvBool hdacodecAccessCallback_DISPATCH(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
return pResource->__hdacodecAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
}
NV_STATUS hdacodecConstruct_IMPL(struct Hdacodec *arg_pHdacodecApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
#define __nvoc_hdacodecConstruct(arg_pHdacodecApi, arg_pCallContext, arg_pParams) hdacodecConstruct_IMPL(arg_pHdacodecApi, arg_pCallContext, arg_pParams)
#undef PRIVATE_FIELD
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_HDA_CODEC_API_NVOC_H_

View File

@@ -0,0 +1,151 @@
#ifndef _G_HYPERVISOR_NVOC_H_
#define _G_HYPERVISOR_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_hypervisor_nvoc.h"
#ifndef HYPERVISOR_H
#define HYPERVISOR_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Module: hypervisor.h *
* Defines and structures used for the hypervisor object. *
\***************************************************************************/
#include "core/core.h"
#include "nvoc/utility.h"
#include "nv-hypervisor.h"
#include "mem_mgr/mem.h"
/* ------------------------ Forward Declarations ---------------------------- */
struct OBJOS;
#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__
#define __NVOC_CLASS_OBJOS_TYPEDEF__
typedef struct OBJOS OBJOS;
#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJOS
#define __nvoc_class_id_OBJOS 0xaa1d70
#endif /* __nvoc_class_id_OBJOS */
typedef struct OBJHYPERVISOR *POBJHYPERVISOR;
#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__
#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__
typedef struct OBJHYPERVISOR OBJHYPERVISOR;
#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHYPERVISOR
#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba
#endif /* __nvoc_class_id_OBJHYPERVISOR */
typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE;
#ifdef NVOC_HYPERVISOR_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJHYPERVISOR {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJHYPERVISOR *__nvoc_pbase_OBJHYPERVISOR;
NvBool bDetected;
NvBool bIsHVMGuest;
HYPERVISOR_TYPE type;
NvBool bIsHypervHost;
NvBool bIsHypervVgpuSupported;
};
#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__
#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__
typedef struct OBJHYPERVISOR OBJHYPERVISOR;
#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJHYPERVISOR
#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba
#endif /* __nvoc_class_id_OBJHYPERVISOR */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHYPERVISOR;
#define __staticCast_OBJHYPERVISOR(pThis) \
((pThis)->__nvoc_pbase_OBJHYPERVISOR)
#ifdef __nvoc_hypervisor_h_disabled
#define __dynamicCast_OBJHYPERVISOR(pThis) ((OBJHYPERVISOR*)NULL)
#else //__nvoc_hypervisor_h_disabled
#define __dynamicCast_OBJHYPERVISOR(pThis) \
((OBJHYPERVISOR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHYPERVISOR)))
#endif //__nvoc_hypervisor_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32);
#define __objCreate_OBJHYPERVISOR(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJHYPERVISOR((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
static inline NvBool hypervisorIsVgxHyper_491d52(void) {
return ((NvBool)(0 != 0));
}
#define hypervisorIsVgxHyper() hypervisorIsVgxHyper_491d52()
#define hypervisorIsVgxHyper_HAL() hypervisorIsVgxHyper()
static inline NvBool hypervisorCheckForAdminAccess(NvHandle hClient, NvU32 rmCtrlId) {
return ((NvBool)(0 != 0));
}
static inline NvBool hypervisorCheckForObjectAccess(NvHandle hClient) {
return ((NvBool)(0 != 0));
}
static inline NvBool hypervisorCheckForGspOffloadAccess(NvU32 rmCtrlId) {
return ((NvBool)(0 != 0));
}
static inline NvBool hypervisorIsType(HYPERVISOR_TYPE hyperType) {
return ((NvBool)(0 != 0));
}
#undef PRIVATE_FIELD
#endif // HYPERVISOR_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_HYPERVISOR_NVOC_H_

View File

@@ -0,0 +1,235 @@
#define NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_io_vaspace_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x28ed9c = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE;
void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE*);
void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE*);
NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE*);
void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE*);
void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE;
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE = {
/*pClassDef=*/ &__nvoc_class_def_OBJIOVASPACE,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJIOVASPACE,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJVASPACE = {
/*pClassDef=*/ &__nvoc_class_def_OBJVASPACE,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJIOVASPACE = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE,
&__nvoc_rtti_OBJIOVASPACE_OBJVASPACE,
&__nvoc_rtti_OBJIOVASPACE_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE =
{
/*classInfo=*/ {
/*size=*/ sizeof(OBJIOVASPACE),
/*classId=*/ classId(OBJIOVASPACE),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "OBJIOVASPACE",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJIOVASPACE,
/*pCastInfo=*/ &__nvoc_castinfo_OBJIOVASPACE,
/*pExportInfo=*/ &__nvoc_export_info_OBJIOVASPACE
};
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) {
return iovaspaceConstruct_((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags);
}
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) {
return iovaspaceAlloc((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr);
}
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) {
return iovaspaceFree((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr);
}
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) {
return iovaspaceApplyDefaultAlignment((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask);
}
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) {
return iovaspaceIncAllocRefCnt((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr);
}
static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS) {
return iovaspaceGetVaStart((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
}
static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS) {
return iovaspaceGetVaLimit((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
}
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) {
return iovaspaceGetVasInfo((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pParams);
}
static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted(struct OBJIOVASPACE *pVAS) {
return vaspaceIsInternalVaRestricted((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
}
static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS) {
return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_OBJVASPACE(OBJVASPACE*);
void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE *pThis) {
__nvoc_iovaspaceDestruct(pThis);
__nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* );
NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE *pThis) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
if (status != NV_OK) goto __nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE;
__nvoc_init_dataField_OBJIOVASPACE(pThis);
goto __nvoc_ctor_OBJIOVASPACE_exit; // Success
__nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE:
__nvoc_ctor_OBJIOVASPACE_exit:
return status;
}
static void __nvoc_init_funcTable_OBJIOVASPACE_1(OBJIOVASPACE *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__iovaspaceConstruct___ = &iovaspaceConstruct__IMPL;
pThis->__iovaspaceAlloc__ = &iovaspaceAlloc_IMPL;
pThis->__iovaspaceFree__ = &iovaspaceFree_IMPL;
pThis->__iovaspaceApplyDefaultAlignment__ = &iovaspaceApplyDefaultAlignment_IMPL;
pThis->__iovaspaceIncAllocRefCnt__ = &iovaspaceIncAllocRefCnt_IMPL;
pThis->__iovaspaceGetVaStart__ = &iovaspaceGetVaStart_IMPL;
pThis->__iovaspaceGetVaLimit__ = &iovaspaceGetVaLimit_IMPL;
pThis->__iovaspaceGetVasInfo__ = &iovaspaceGetVasInfo_IMPL;
pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_;
pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceAlloc;
pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceFree;
pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment;
pThis->__nvoc_base_OBJVASPACE.__vaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt;
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaStart__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart;
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaLimit__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit;
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo;
pThis->__iovaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted;
pThis->__iovaspaceGetFlags__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetFlags;
}
void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE *pThis) {
__nvoc_init_funcTable_OBJIOVASPACE_1(pThis);
}
void __nvoc_init_OBJVASPACE(OBJVASPACE*);
void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE *pThis) {
pThis->__nvoc_pbase_OBJIOVASPACE = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE;
__nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
__nvoc_init_funcTable_OBJIOVASPACE(pThis);
}
NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
OBJIOVASPACE *pThis;
pThis = portMemAllocNonPaged(sizeof(OBJIOVASPACE));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(OBJIOVASPACE));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJIOVASPACE);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_OBJIOVASPACE(pThis);
status = __nvoc_ctor_OBJIOVASPACE(pThis);
if (status != NV_OK) goto __nvoc_objCreate_OBJIOVASPACE_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_OBJIOVASPACE_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_OBJIOVASPACE(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,303 @@
#ifndef _G_IO_VASPACE_NVOC_H_
#define _G_IO_VASPACE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_io_vaspace_nvoc.h"
#ifndef _IOVASPACE_H_
#define _IOVASPACE_H_
/**************** Resource Manager Defines and Structures ******************\
* *
* Module: IOVASPACE.H *
* Defines and structures used for IOMMU Virtual Address Space Object. *
\***************************************************************************/
#include "mem_mgr/vaspace.h" // base class object header
#define NV_IOVA_DOMAIN_NONE (~(NvU32)0)
typedef struct OBJIOVASPACE *POBJIOVASPACE;
#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__
#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__
typedef struct OBJIOVASPACE OBJIOVASPACE;
#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJIOVASPACE
#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c
#endif /* __nvoc_class_id_OBJIOVASPACE */
typedef struct IOVAMAPPING IOVAMAPPING;
typedef struct IOVAMAPPING *PIOVAMAPPING;
// Opaque pointer for the OS layer to use
typedef struct OS_IOVA_MAPPING_DATA *POS_IOVA_MAPPING_DATA;
struct IOVAMAPPING
{
NvU32 iovaspaceId;
//
// Refcount of the mapping.
//
// Each iovaspaceAcquireMapping() call increments the refcount, and each
// iovaspaceReleaseMapping() call decrements it. Additionally, submappings
// increment the refcount of their root mapping on creation and only
// decrement it when they are destroyed.
//
// Mappings are destroyed when their refcount reaches 0.
//
// Notably a mapping can be destroyed regardless of its refcount with
// iovaspaceDestroyMapping(). Destroying a root mapping destroys all of its
// submappings as well.
//
NvU32 refcount;
PMEMORY_DESCRIPTOR pPhysMemDesc;
//
// Maintain a hierarchy of IOVA mappings. The "root" mapping will generally
// be tied to the root memory descriptor. That mapping can have submappings
// within the same IOVA space that correspond to submemory descriptors of
// the root memory descriptor.
//
// Also, the root memory descriptor may have multiple IOVA mappings (up to
// one per IOVA space), so those need to be tracked in association directly
// with the root memory descriptor.
//
// The memory descriptor (root or submemory) always points to a single IOVA
// mapping. For root memory descriptors, that mapping is the head of a list
// in which each mapping covers a unique IOVA space. For submemory
// descriptors, there can only be one IOVA mapping, corresponding to the
// IOVA space of the pGpu associated with the submemory descriptor.
//
union
{
struct IOVAMAPPING *pParent;
struct IOVAMAPPING *pChildren;
} link;
//
// For root mappings, this points to the next root mapping for the same
// parent physical memory descriptor (e.g., a root mapping for a different
// IOVA space).
//
// For submappings, this instead points to the next submapping of the
// parent root mapping, since a submemory descriptor may only have a single
// IOVA mapping (which is a submapping of an IOVA mapping on the root
// memory descriptor).
//
struct IOVAMAPPING *pNext;
// OS data associated with this mapping. Core RM doesn't touch this.
POS_IOVA_MAPPING_DATA pOsData;
//
// If the memory is contiguous, this array consists of one element.
// If the memory is discontiguous, this array is actually larger and has
// one entry for each physical page in pPhysMemDesc. As a result, this
// structure must be allocated from the heap.
//
RmPhysAddr iovaArray[1];
// WARNING: DO NOT place anything behind the IOVA array!
};
/*!
* Virtual address space for a system's IOMMU translation.
*/
#ifdef NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct OBJIOVASPACE {
const struct NVOC_RTTI *__nvoc_rtti;
struct OBJVASPACE __nvoc_base_OBJVASPACE;
struct Object *__nvoc_pbase_Object;
struct OBJVASPACE *__nvoc_pbase_OBJVASPACE;
struct OBJIOVASPACE *__nvoc_pbase_OBJIOVASPACE;
NV_STATUS (*__iovaspaceConstruct___)(struct OBJIOVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32);
NV_STATUS (*__iovaspaceAlloc__)(struct OBJIOVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *);
NV_STATUS (*__iovaspaceFree__)(struct OBJIOVASPACE *, NvU64);
NV_STATUS (*__iovaspaceApplyDefaultAlignment__)(struct OBJIOVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *);
NV_STATUS (*__iovaspaceIncAllocRefCnt__)(struct OBJIOVASPACE *, NvU64);
NvU64 (*__iovaspaceGetVaStart__)(struct OBJIOVASPACE *);
NvU64 (*__iovaspaceGetVaLimit__)(struct OBJIOVASPACE *);
NV_STATUS (*__iovaspaceGetVasInfo__)(struct OBJIOVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *);
NvBool (*__iovaspaceIsInternalVaRestricted__)(struct OBJIOVASPACE *);
NvU32 (*__iovaspaceGetFlags__)(struct OBJIOVASPACE *);
NvU64 mappingCount;
};
#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__
#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__
typedef struct OBJIOVASPACE OBJIOVASPACE;
#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */
#ifndef __nvoc_class_id_OBJIOVASPACE
#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c
#endif /* __nvoc_class_id_OBJIOVASPACE */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE;
#define __staticCast_OBJIOVASPACE(pThis) \
((pThis)->__nvoc_pbase_OBJIOVASPACE)
#ifdef __nvoc_io_vaspace_h_disabled
#define __dynamicCast_OBJIOVASPACE(pThis) ((OBJIOVASPACE*)NULL)
#else //__nvoc_io_vaspace_h_disabled
#define __dynamicCast_OBJIOVASPACE(pThis) \
((OBJIOVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJIOVASPACE)))
#endif //__nvoc_io_vaspace_h_disabled
NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32);
#define __objCreate_OBJIOVASPACE(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_OBJIOVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define iovaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) iovaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags)
#define iovaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) iovaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr)
#define iovaspaceFree(pVAS, vAddr) iovaspaceFree_DISPATCH(pVAS, vAddr)
#define iovaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) iovaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask)
#define iovaspaceIncAllocRefCnt(pVAS, vAddr) iovaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr)
#define iovaspaceGetVaStart(pVAS) iovaspaceGetVaStart_DISPATCH(pVAS)
#define iovaspaceGetVaLimit(pVAS) iovaspaceGetVaLimit_DISPATCH(pVAS)
#define iovaspaceGetVasInfo(pVAS, pParams) iovaspaceGetVasInfo_DISPATCH(pVAS, pParams)
#define iovaspaceIsInternalVaRestricted(pVAS) iovaspaceIsInternalVaRestricted_DISPATCH(pVAS)
#define iovaspaceGetFlags(pVAS) iovaspaceGetFlags_DISPATCH(pVAS)
NV_STATUS iovaspaceConstruct__IMPL(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags);
static inline NV_STATUS iovaspaceConstruct__DISPATCH(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) {
return pVAS->__iovaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags);
}
NV_STATUS iovaspaceAlloc_IMPL(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr);
static inline NV_STATUS iovaspaceAlloc_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) {
return pVAS->__iovaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr);
}
NV_STATUS iovaspaceFree_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr);
static inline NV_STATUS iovaspaceFree_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) {
return pVAS->__iovaspaceFree__(pVAS, vAddr);
}
NV_STATUS iovaspaceApplyDefaultAlignment_IMPL(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask);
static inline NV_STATUS iovaspaceApplyDefaultAlignment_DISPATCH(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) {
return pVAS->__iovaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask);
}
NV_STATUS iovaspaceIncAllocRefCnt_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr);
static inline NV_STATUS iovaspaceIncAllocRefCnt_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) {
return pVAS->__iovaspaceIncAllocRefCnt__(pVAS, vAddr);
}
NvU64 iovaspaceGetVaStart_IMPL(struct OBJIOVASPACE *pVAS);
static inline NvU64 iovaspaceGetVaStart_DISPATCH(struct OBJIOVASPACE *pVAS) {
return pVAS->__iovaspaceGetVaStart__(pVAS);
}
NvU64 iovaspaceGetVaLimit_IMPL(struct OBJIOVASPACE *pVAS);
static inline NvU64 iovaspaceGetVaLimit_DISPATCH(struct OBJIOVASPACE *pVAS) {
return pVAS->__iovaspaceGetVaLimit__(pVAS);
}
NV_STATUS iovaspaceGetVasInfo_IMPL(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams);
static inline NV_STATUS iovaspaceGetVasInfo_DISPATCH(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) {
return pVAS->__iovaspaceGetVasInfo__(pVAS, pParams);
}
static inline NvBool iovaspaceIsInternalVaRestricted_DISPATCH(struct OBJIOVASPACE *pVAS) {
return pVAS->__iovaspaceIsInternalVaRestricted__(pVAS);
}
static inline NvU32 iovaspaceGetFlags_DISPATCH(struct OBJIOVASPACE *pVAS) {
return pVAS->__iovaspaceGetFlags__(pVAS);
}
void iovaspaceDestruct_IMPL(struct OBJIOVASPACE *pIOVAS);
#define __nvoc_iovaspaceDestruct(pIOVAS) iovaspaceDestruct_IMPL(pIOVAS)
NV_STATUS iovaspaceAcquireMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping);
#ifdef __nvoc_io_vaspace_h_disabled
static inline NV_STATUS iovaspaceAcquireMapping(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping) {
NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_io_vaspace_h_disabled
#define iovaspaceAcquireMapping(pIOVAS, pIovaMapping) iovaspaceAcquireMapping_IMPL(pIOVAS, pIovaMapping)
#endif //__nvoc_io_vaspace_h_disabled
void iovaspaceReleaseMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping);
#ifdef __nvoc_io_vaspace_h_disabled
static inline void iovaspaceReleaseMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) {
NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!");
}
#else //__nvoc_io_vaspace_h_disabled
#define iovaspaceReleaseMapping(pIOVAS, pIovaMapping) iovaspaceReleaseMapping_IMPL(pIOVAS, pIovaMapping)
#endif //__nvoc_io_vaspace_h_disabled
void iovaspaceDestroyMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping);
#ifdef __nvoc_io_vaspace_h_disabled
static inline void iovaspaceDestroyMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) {
NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!");
}
#else //__nvoc_io_vaspace_h_disabled
#define iovaspaceDestroyMapping(pIOVAS, pIovaMapping) iovaspaceDestroyMapping_IMPL(pIOVAS, pIovaMapping)
#endif //__nvoc_io_vaspace_h_disabled
#undef PRIVATE_FIELD
struct OBJIOVASPACE* iovaspaceFromId(NvU32 iovaspaceId);
struct OBJIOVASPACE* iovaspaceFromMapping(PIOVAMAPPING pIovaMapping);
//
// Helper that looks up the IOVAS from the mapping and then calls
// iovaspaceDestroyMapping().
//
void iovaMappingDestroy(PIOVAMAPPING pIovaMapping);
#endif // _IOVASPACE_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_IO_VASPACE_NVOC_H_

View File

@@ -0,0 +1,47 @@
#ifndef _G_JOURNAL_NVOC_H_
#define _G_JOURNAL_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_journal_nvoc.h"
#ifndef _JOURNAL_H_
#define _JOURNAL_H_
//
// Journal object defines and Structures
//
#include "kernel/core/core.h"
#endif // _JOURNAL_H_
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_JOURNAL_NVOC_H_

View File

@@ -0,0 +1,346 @@
#define NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_kern_disp_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x55952e = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
void __nvoc_init_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
void __nvoc_init_funcTable_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
void __nvoc_init_dataField_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
void __nvoc_dtor_KernelDisplay(KernelDisplay*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay;
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_KernelDisplay = {
/*pClassDef=*/ &__nvoc_class_def_KernelDisplay,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelDisplay,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_OBJENGSTATE = {
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelDisplay = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_KernelDisplay_KernelDisplay,
&__nvoc_rtti_KernelDisplay_OBJENGSTATE,
&__nvoc_rtti_KernelDisplay_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay =
{
/*classInfo=*/ {
/*size=*/ sizeof(KernelDisplay),
/*classId=*/ classId(KernelDisplay),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "KernelDisplay",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelDisplay,
/*pCastInfo=*/ &__nvoc_castinfo_KernelDisplay,
/*pExportInfo=*/ &__nvoc_export_info_KernelDisplay
};
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc) {
return kdispConstructEngine(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), engDesc);
}
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
return kdispStatePreInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
return kdispStateInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static void __nvoc_thunk_KernelDisplay_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
kdispStateDestroy(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) {
return kdispStateLoad(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags);
}
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) {
return kdispStateUnload(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static void __nvoc_thunk_OBJENGSTATE_kdispInitMissing(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispGetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispCompareTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) {
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunables1, pTunables2);
}
static void __nvoc_thunk_OBJENGSTATE_kdispFreeTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispAllocTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) {
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), ppTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispSetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
}
static NvBool __nvoc_thunk_OBJENGSTATE_kdispIsPresent(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_dtor_KernelDisplay(KernelDisplay *pThis) {
__nvoc_kdispDestruct(pThis);
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(dispIpHal);
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
// NVOC Property Hal field -- PDB_PROP_KDISP_IS_MISSING
if (0)
{
}
else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->setProperty(pThis, PDB_PROP_KDISP_IS_MISSING, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_KDISP_IMP_ENABLE
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 == 0)));
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 != 0)));
}
pThis->pStaticInfo = ((void *)0);
pThis->bWarPurgeSatellitesOnCoreFree = ((NvBool)(0 != 0));
}
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_OBJENGSTATE;
__nvoc_init_dataField_KernelDisplay(pThis, pRmhalspecowner);
goto __nvoc_ctor_KernelDisplay_exit; // Success
__nvoc_ctor_KernelDisplay_fail_OBJENGSTATE:
__nvoc_ctor_KernelDisplay_exit:
return status;
}
static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(dispIpHal);
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
pThis->__kdispConstructEngine__ = &kdispConstructEngine_IMPL;
pThis->__kdispStatePreInitLocked__ = &kdispStatePreInitLocked_IMPL;
pThis->__kdispStateInitLocked__ = &kdispStateInitLocked_IMPL;
pThis->__kdispStateDestroy__ = &kdispStateDestroy_IMPL;
pThis->__kdispStateLoad__ = &kdispStateLoad_IMPL;
pThis->__kdispStateUnload__ = &kdispStateUnload_IMPL;
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelDisplay_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStatePreInitLocked;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStateInitLocked;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelDisplay_engstateStateDestroy;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelDisplay_engstateStateLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelDisplay_engstateStateUnload;
pThis->__kdispReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState;
pThis->__kdispStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreLoad;
pThis->__kdispStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostUnload;
pThis->__kdispStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreUnload;
pThis->__kdispStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked;
pThis->__kdispInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kdispInitMissing;
pThis->__kdispStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked;
pThis->__kdispGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispGetTunableState;
pThis->__kdispCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispCompareTunableState;
pThis->__kdispFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispFreeTunableState;
pThis->__kdispStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostLoad;
pThis->__kdispAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispAllocTunableState;
pThis->__kdispSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispSetTunableState;
pThis->__kdispIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kdispIsPresent;
}
void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_KernelDisplay_1(pThis, pRmhalspecowner);
}
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_init_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_KernelDisplay = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
__nvoc_init_funcTable_KernelDisplay(pThis, pRmhalspecowner);
}
NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
KernelDisplay *pThis;
RmHalspecOwner *pRmhalspecowner;
pThis = portMemAllocNonPaged(sizeof(KernelDisplay));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(KernelDisplay));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelDisplay);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
}
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_KernelDisplay(pThis, pRmhalspecowner);
status = __nvoc_ctor_KernelDisplay(pThis, pRmhalspecowner);
if (status != NV_OK) goto __nvoc_objCreate_KernelDisplay_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_KernelDisplay_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_KernelDisplay(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,642 @@
#ifndef _G_KERN_DISP_NVOC_H_
#define _G_KERN_DISP_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_kern_disp_nvoc.h"
#ifndef KERN_DISP_H
#define KERN_DISP_H
/******************************************************************************
*
* Kernel Display module header
* This file contains functions managing display on CPU RM
*
******************************************************************************/
#include "gpu/eng_state.h"
#include "gpu/gpu_halspec.h"
#include "gpu/disp/kern_disp_type.h"
#include "gpu/disp/kern_disp_max.h"
#include "gpu/mem_mgr/context_dma.h"
#include "gpu/disp/vblank_callback/vblank.h"
#include "ctrl/ctrl2080/ctrl2080internal.h"
typedef NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS KernelDisplayStaticInfo;
struct DispChannel;
#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__
#define __NVOC_CLASS_DispChannel_TYPEDEF__
typedef struct DispChannel DispChannel;
#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */
#ifndef __nvoc_class_id_DispChannel
#define __nvoc_class_id_DispChannel 0xbd2ff3
#endif /* __nvoc_class_id_DispChannel */
struct RgLineCallback;
#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__
#define __NVOC_CLASS_RgLineCallback_TYPEDEF__
typedef struct RgLineCallback RgLineCallback;
#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */
#ifndef __nvoc_class_id_RgLineCallback
#define __nvoc_class_id_RgLineCallback 0xa3ff1c
#endif /* __nvoc_class_id_RgLineCallback */
#define KDISP_GET_HEAD(pKernelDisplay, headID) (RMCFG_MODULE_KERNEL_HEAD ? kdispGetHead(pKernelDisplay, headID) : NULL)
/*!
* KernelDisp is a logical abstraction of the GPU Display Engine. The
* Public API of the Display Engine is exposed through this object, and any
* interfaces which do not manage the underlying Display hardware can be
* managed by this object.
*/
#ifdef NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct KernelDisplay {
const struct NVOC_RTTI *__nvoc_rtti;
struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
struct Object *__nvoc_pbase_Object;
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
struct KernelDisplay *__nvoc_pbase_KernelDisplay;
NV_STATUS (*__kdispConstructEngine__)(struct OBJGPU *, struct KernelDisplay *, ENGDESCRIPTOR);
NV_STATUS (*__kdispStatePreInitLocked__)(struct OBJGPU *, struct KernelDisplay *);
NV_STATUS (*__kdispStateInitLocked__)(struct OBJGPU *, struct KernelDisplay *);
void (*__kdispStateDestroy__)(struct OBJGPU *, struct KernelDisplay *);
NV_STATUS (*__kdispStateLoad__)(struct OBJGPU *, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispStateUnload__)(struct OBJGPU *, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispReconcileTunableState__)(POBJGPU, struct KernelDisplay *, void *);
NV_STATUS (*__kdispStatePreLoad__)(POBJGPU, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispStatePostUnload__)(POBJGPU, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispStatePreUnload__)(POBJGPU, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispStateInitUnlocked__)(POBJGPU, struct KernelDisplay *);
void (*__kdispInitMissing__)(POBJGPU, struct KernelDisplay *);
NV_STATUS (*__kdispStatePreInitUnlocked__)(POBJGPU, struct KernelDisplay *);
NV_STATUS (*__kdispGetTunableState__)(POBJGPU, struct KernelDisplay *, void *);
NV_STATUS (*__kdispCompareTunableState__)(POBJGPU, struct KernelDisplay *, void *, void *);
void (*__kdispFreeTunableState__)(POBJGPU, struct KernelDisplay *, void *);
NV_STATUS (*__kdispStatePostLoad__)(POBJGPU, struct KernelDisplay *, NvU32);
NV_STATUS (*__kdispAllocTunableState__)(POBJGPU, struct KernelDisplay *, void **);
NV_STATUS (*__kdispSetTunableState__)(POBJGPU, struct KernelDisplay *, void *);
NvBool (*__kdispIsPresent__)(POBJGPU, struct KernelDisplay *);
NvBool PDB_PROP_KDISP_IMP_ENABLE;
struct DisplayInstanceMemory *pInst;
struct KernelHead *pKernelHead[4];
const KernelDisplayStaticInfo *pStaticInfo;
NvBool bWarPurgeSatellitesOnCoreFree;
struct RgLineCallback *rgLineCallbackPerHead[4][2];
NvU32 isrVblankHeads;
};
#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__
#define __NVOC_CLASS_KernelDisplay_TYPEDEF__
typedef struct KernelDisplay KernelDisplay;
#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */
#ifndef __nvoc_class_id_KernelDisplay
#define __nvoc_class_id_KernelDisplay 0x55952e
#endif /* __nvoc_class_id_KernelDisplay */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay;
#define __staticCast_KernelDisplay(pThis) \
((pThis)->__nvoc_pbase_KernelDisplay)
#ifdef __nvoc_kern_disp_h_disabled
#define __dynamicCast_KernelDisplay(pThis) ((KernelDisplay*)NULL)
#else //__nvoc_kern_disp_h_disabled
#define __dynamicCast_KernelDisplay(pThis) \
((KernelDisplay*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelDisplay)))
#endif //__nvoc_kern_disp_h_disabled
#define PDB_PROP_KDISP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
#define PDB_PROP_KDISP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
#define PDB_PROP_KDISP_IMP_ENABLE_BASE_CAST
#define PDB_PROP_KDISP_IMP_ENABLE_BASE_NAME PDB_PROP_KDISP_IMP_ENABLE
NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay**, Dynamic*, NvU32);
#define __objCreate_KernelDisplay(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_KernelDisplay((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define kdispConstructEngine(pGpu, pKernelDisplay, engDesc) kdispConstructEngine_DISPATCH(pGpu, pKernelDisplay, engDesc)
#define kdispStatePreInitLocked(pGpu, pKernelDisplay) kdispStatePreInitLocked_DISPATCH(pGpu, pKernelDisplay)
#define kdispStateInitLocked(pGpu, pKernelDisplay) kdispStateInitLocked_DISPATCH(pGpu, pKernelDisplay)
#define kdispStateDestroy(pGpu, pKernelDisplay) kdispStateDestroy_DISPATCH(pGpu, pKernelDisplay)
#define kdispStateLoad(pGpu, pKernelDisplay, flags) kdispStateLoad_DISPATCH(pGpu, pKernelDisplay, flags)
#define kdispStateUnload(pGpu, pKernelDisplay, flags) kdispStateUnload_DISPATCH(pGpu, pKernelDisplay, flags)
#define kdispReconcileTunableState(pGpu, pEngstate, pTunableState) kdispReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kdispStatePreLoad(pGpu, pEngstate, arg0) kdispStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kdispStatePostUnload(pGpu, pEngstate, arg0) kdispStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kdispStatePreUnload(pGpu, pEngstate, arg0) kdispStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kdispStateInitUnlocked(pGpu, pEngstate) kdispStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kdispInitMissing(pGpu, pEngstate) kdispInitMissing_DISPATCH(pGpu, pEngstate)
#define kdispStatePreInitUnlocked(pGpu, pEngstate) kdispStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kdispGetTunableState(pGpu, pEngstate, pTunableState) kdispGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kdispCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kdispCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
#define kdispFreeTunableState(pGpu, pEngstate, pTunableState) kdispFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kdispStatePostLoad(pGpu, pEngstate, arg0) kdispStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kdispAllocTunableState(pGpu, pEngstate, ppTunableState) kdispAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
#define kdispSetTunableState(pGpu, pEngstate, pTunableState) kdispSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kdispIsPresent(pGpu, pEngstate) kdispIsPresent_DISPATCH(pGpu, pEngstate)
NV_STATUS kdispConstructInstMem_IMPL(struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispConstructInstMem(struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispConstructInstMem(pKernelDisplay) kdispConstructInstMem_IMPL(pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
#define kdispConstructInstMem_HAL(pKernelDisplay) kdispConstructInstMem(pKernelDisplay)
void kdispDestructInstMem_IMPL(struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispDestructInstMem(struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispDestructInstMem(pKernelDisplay) kdispDestructInstMem_IMPL(pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
#define kdispDestructInstMem_HAL(pKernelDisplay) kdispDestructInstMem(pKernelDisplay)
NV_STATUS kdispSelectClass_v03_00_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispSelectClass(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispSelectClass(pGpu, pKernelDisplay, swClass) kdispSelectClass_v03_00_KERNEL(pGpu, pKernelDisplay, swClass)
#endif //__nvoc_kern_disp_h_disabled
#define kdispSelectClass_HAL(pGpu, pKernelDisplay, swClass) kdispSelectClass(pGpu, pKernelDisplay, swClass)
NvS32 kdispGetBaseOffset_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline NvS32 kdispGetBaseOffset(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return 0;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetBaseOffset(pGpu, pKernelDisplay) kdispGetBaseOffset_v04_02(pGpu, pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetBaseOffset_HAL(pGpu, pKernelDisplay) kdispGetBaseOffset(pGpu, pKernelDisplay)
NV_STATUS kdispGetChannelNum_v03_00(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispGetChannelNum(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_v03_00(pKernelDisplay, channelClass, channelInstance, pChannelNum)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum)
void kdispGetDisplayCapsBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispGetDisplayCapsBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize)
void kdispGetDisplaySfUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispGetDisplaySfUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize)
NV_STATUS kdispGetDisplayChannelUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_v03_00(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize)
NV_STATUS kdispImportImpData_IMPL(struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispImportImpData(struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispImportImpData(pKernelDisplay) kdispImportImpData_IMPL(pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
#define kdispImportImpData_HAL(pKernelDisplay) kdispImportImpData(pKernelDisplay)
NV_STATUS kdispArbAndAllocDisplayBandwidth_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispArbAndAllocDisplayBandwidth(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth_v04_02(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
#endif //__nvoc_kern_disp_h_disabled
#define kdispArbAndAllocDisplayBandwidth_HAL(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
NV_STATUS kdispSetPushBufferParamsToPhysical_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispSetPushBufferParamsToPhysical(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical_IMPL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass)
#endif //__nvoc_kern_disp_h_disabled
#define kdispSetPushBufferParamsToPhysical_HAL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass)
static inline NV_STATUS kdispAcquireDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) {
return NV_OK;
}
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispAcquireDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw_56cd7a(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab)
#endif //__nvoc_kern_disp_h_disabled
#define kdispAcquireDispChannelHw_HAL(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab)
static inline NV_STATUS kdispReleaseDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
return NV_OK;
}
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispReleaseDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw_56cd7a(pKernelDisplay, pDispChannel)
#endif //__nvoc_kern_disp_h_disabled
#define kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel)
NV_STATUS kdispMapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispMapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispMapDispChannel(pKernelDisplay, pDispChannel) kdispMapDispChannel_IMPL(pKernelDisplay, pDispChannel)
#endif //__nvoc_kern_disp_h_disabled
#define kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispMapDispChannel(pKernelDisplay, pDispChannel)
void kdispUnbindUnmapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispUnbindUnmapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel_IMPL(pKernelDisplay, pDispChannel)
#endif //__nvoc_kern_disp_h_disabled
#define kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel)
NV_STATUS kdispRegisterRgLineCallback_IMPL(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispRegisterRgLineCallback(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback_IMPL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable)
#endif //__nvoc_kern_disp_h_disabled
#define kdispRegisterRgLineCallback_HAL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable)
void kdispInvokeRgLineCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispInvokeRgLineCallback(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback_KERNEL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr)
#endif //__nvoc_kern_disp_h_disabled
#define kdispInvokeRgLineCallback_HAL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr)
void kdispServiceVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispServiceVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank_KERNEL(pGpu, pKernelDisplay, arg0, arg1, arg2)
#endif //__nvoc_kern_disp_h_disabled
#define kdispServiceVblank_HAL(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2)
NvU32 kdispReadPendingVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0);
#ifdef __nvoc_kern_disp_h_disabled
static inline NvU32 kdispReadPendingVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return 0;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank_KERNEL(pGpu, pKernelDisplay, arg0)
#endif //__nvoc_kern_disp_h_disabled
#define kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank(pGpu, pKernelDisplay, arg0)
static inline NvBool kdispGetVgaWorkspaceBase_ceaee8(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) {
NV_ASSERT_PRECOMP(0);
return ((NvBool)(0 != 0));
}
#ifdef __nvoc_kern_disp_h_disabled
static inline NvBool kdispGetVgaWorkspaceBase(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_FALSE;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_ceaee8(pGpu, pKernelDisplay, pOffset)
#endif //__nvoc_kern_disp_h_disabled
#define kdispGetVgaWorkspaceBase_HAL(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset)
void kdispInvokeDisplayModesetCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispInvokeDisplayModesetCallback(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback_KERNEL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
#endif //__nvoc_kern_disp_h_disabled
#define kdispInvokeDisplayModesetCallback_HAL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
NV_STATUS kdispConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc);
static inline NV_STATUS kdispConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc) {
return pKernelDisplay->__kdispConstructEngine__(pGpu, pKernelDisplay, engDesc);
}
NV_STATUS kdispStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
static inline NV_STATUS kdispStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
return pKernelDisplay->__kdispStatePreInitLocked__(pGpu, pKernelDisplay);
}
NV_STATUS kdispStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
static inline NV_STATUS kdispStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
return pKernelDisplay->__kdispStateInitLocked__(pGpu, pKernelDisplay);
}
void kdispStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
static inline void kdispStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
pKernelDisplay->__kdispStateDestroy__(pGpu, pKernelDisplay);
}
NV_STATUS kdispStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags);
static inline NV_STATUS kdispStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) {
return pKernelDisplay->__kdispStateLoad__(pGpu, pKernelDisplay, flags);
}
NV_STATUS kdispStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags);
static inline NV_STATUS kdispStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) {
return pKernelDisplay->__kdispStateUnload__(pGpu, pKernelDisplay, flags);
}
static inline NV_STATUS kdispReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return pEngstate->__kdispReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kdispStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return pEngstate->__kdispStatePreLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kdispStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return pEngstate->__kdispStatePostUnload__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kdispStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return pEngstate->__kdispStatePreUnload__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kdispStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return pEngstate->__kdispStateInitUnlocked__(pGpu, pEngstate);
}
static inline void kdispInitMissing_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
pEngstate->__kdispInitMissing__(pGpu, pEngstate);
}
static inline NV_STATUS kdispStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return pEngstate->__kdispStatePreInitUnlocked__(pGpu, pEngstate);
}
static inline NV_STATUS kdispGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return pEngstate->__kdispGetTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kdispCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) {
return pEngstate->__kdispCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
}
static inline void kdispFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
pEngstate->__kdispFreeTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kdispStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
return pEngstate->__kdispStatePostLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kdispAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) {
return pEngstate->__kdispAllocTunableState__(pGpu, pEngstate, ppTunableState);
}
static inline NV_STATUS kdispSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
return pEngstate->__kdispSetTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NvBool kdispIsPresent_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
return pEngstate->__kdispIsPresent__(pGpu, pEngstate);
}
void kdispDestruct_IMPL(struct KernelDisplay *pKernelDisplay);
#define __nvoc_kdispDestruct(pKernelDisplay) kdispDestruct_IMPL(pKernelDisplay)
NV_STATUS kdispConstructKhead_IMPL(struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispConstructKhead(struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispConstructKhead(pKernelDisplay) kdispConstructKhead_IMPL(pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
void kdispDestructKhead_IMPL(struct KernelDisplay *pKernelDisplay);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispDestructKhead(struct KernelDisplay *pKernelDisplay) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispDestructKhead(pKernelDisplay) kdispDestructKhead_IMPL(pKernelDisplay)
#endif //__nvoc_kern_disp_h_disabled
NV_STATUS kdispGetIntChnClsForHwCls_IMPL(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass);
#ifdef __nvoc_kern_disp_h_disabled
static inline NV_STATUS kdispGetIntChnClsForHwCls(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_disp_h_disabled
#define kdispGetIntChnClsForHwCls(pKernelDisplay, hwClass, pDispChnClass) kdispGetIntChnClsForHwCls_IMPL(pKernelDisplay, hwClass, pDispChnClass)
#endif //__nvoc_kern_disp_h_disabled
void kdispNotifyEvent_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispNotifyEvent(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispNotifyEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) kdispNotifyEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16)
#endif //__nvoc_kern_disp_h_disabled
void kdispSetWarPurgeSatellitesOnCoreFree_IMPL(struct KernelDisplay *pKernelDisplay, NvBool value);
#ifdef __nvoc_kern_disp_h_disabled
static inline void kdispSetWarPurgeSatellitesOnCoreFree(struct KernelDisplay *pKernelDisplay, NvBool value) {
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
}
#else //__nvoc_kern_disp_h_disabled
#define kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, value) kdispSetWarPurgeSatellitesOnCoreFree_IMPL(pKernelDisplay, value)
#endif //__nvoc_kern_disp_h_disabled
#undef PRIVATE_FIELD
void
dispdeviceFillVgaSavedDisplayState( struct OBJGPU *pGpu,
NvU64 vgaAddr,
NvU8 vgaMemType,
NvBool vgaValid,
NvU64 workspaceAddr,
NvU8 workspaceMemType,
NvBool workspaceValid,
NvBool baseValid,
NvBool workspaceBaseValid
);
static NV_INLINE struct KernelHead*
kdispGetHead
(
struct KernelDisplay *pKernelDisplay,
NvU32 head
)
{
if (head >= OBJ_MAX_HEADS)
{
return NULL;
}
return pKernelDisplay->pKernelHead[head];
}
static NV_INLINE NvU32
kdispGetNumHeads(struct KernelDisplay *pKernelDisplay)
{
NV_ASSERT(pKernelDisplay->pStaticInfo != NULL);
return pKernelDisplay->pStaticInfo->numHeads;
}
static NV_INLINE NvU32
kdispGetIsPrimaryVga(struct KernelDisplay *pKernelDisplay)
{
NV_ASSERT(pKernelDisplay->pStaticInfo != NULL);
return pKernelDisplay->pStaticInfo->bPrimaryVga;
}
#endif // KERN_DISP_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_KERN_DISP_NVOC_H_

View File

@@ -0,0 +1,176 @@
#define NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_kernel_head_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x0145e6 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_KernelHead(KernelHead*, RmHalspecOwner* );
void __nvoc_init_funcTable_KernelHead(KernelHead*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_KernelHead(KernelHead*, RmHalspecOwner* );
void __nvoc_init_dataField_KernelHead(KernelHead*, RmHalspecOwner* );
void __nvoc_dtor_KernelHead(KernelHead*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead;
static const struct NVOC_RTTI __nvoc_rtti_KernelHead_KernelHead = {
/*pClassDef=*/ &__nvoc_class_def_KernelHead,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelHead,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_KernelHead_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelHead, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelHead = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_KernelHead_KernelHead,
&__nvoc_rtti_KernelHead_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead =
{
/*classInfo=*/ {
/*size=*/ sizeof(KernelHead),
/*classId=*/ classId(KernelHead),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "KernelHead",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelHead,
/*pCastInfo=*/ &__nvoc_castinfo_KernelHead,
/*pExportInfo=*/ &__nvoc_export_info_KernelHead
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_KernelHead(KernelHead *pThis) {
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail_Object;
__nvoc_init_dataField_KernelHead(pThis, pRmhalspecowner);
status = __nvoc_kheadConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail__init;
goto __nvoc_ctor_KernelHead_exit; // Success
__nvoc_ctor_KernelHead_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_KernelHead_fail_Object:
__nvoc_ctor_KernelHead_exit:
return status;
}
static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
}
void __nvoc_init_funcTable_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_KernelHead_1(pThis, pRmhalspecowner);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_KernelHead = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_KernelHead(pThis, pRmhalspecowner);
}
NV_STATUS __nvoc_objCreate_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
KernelHead *pThis;
RmHalspecOwner *pRmhalspecowner;
pThis = portMemAllocNonPaged(sizeof(KernelHead));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(KernelHead));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelHead);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_KernelHead(pThis, pRmhalspecowner);
status = __nvoc_ctor_KernelHead(pThis, pRmhalspecowner);
if (status != NV_OK) goto __nvoc_objCreate_KernelHead_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_KernelHead_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_KernelHead(ppThis, pParent, createFlags);
return status;
}

View File

@@ -0,0 +1,354 @@
#ifndef _G_KERNEL_HEAD_NVOC_H_
#define _G_KERNEL_HEAD_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**************************** Kernelhead Routines **************************\
* *
* Kernel head object function Definitions. *
* *
\***************************************************************************/
#include "g_kernel_head_nvoc.h"
#ifndef KERNEL_HEAD_H
#define KERNEL_HEAD_H
/* ------------------------ Includes --------------------------------------- */
#include "gpu/disp/vblank_callback/vblank.h"
#include "gpu/gpu_halspec.h"
/* ------------------------ Types definitions ------------------------------ */
enum
{
headIntr_none = 0,
headIntr_vblank = NVBIT(0),
};
/* ------------------------ Macros & Defines ------------------------------- */
#ifdef NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct __nvoc_inner_struc_KernelHead_1__ {
struct {
NvU32 Total;
NvU32 LowLatency;
NvU32 NormLatency;
} Counters;
struct {
VBLANKCALLBACK *pListLL;
VBLANKCALLBACK *pListNL;
} Callback;
NvU32 IntrState;
};
struct KernelHead {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct KernelHead *__nvoc_pbase_KernelHead;
struct __nvoc_inner_struc_KernelHead_1__ Vblank;
NvU32 PublicId;
};
#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__
#define __NVOC_CLASS_KernelHead_TYPEDEF__
typedef struct KernelHead KernelHead;
#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */
#ifndef __nvoc_class_id_KernelHead
#define __nvoc_class_id_KernelHead 0x0145e6
#endif /* __nvoc_class_id_KernelHead */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead;
#define __staticCast_KernelHead(pThis) \
((pThis)->__nvoc_pbase_KernelHead)
#ifdef __nvoc_kernel_head_h_disabled
#define __dynamicCast_KernelHead(pThis) ((KernelHead*)NULL)
#else //__nvoc_kernel_head_h_disabled
#define __dynamicCast_KernelHead(pThis) \
((KernelHead*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHead)))
#endif //__nvoc_kernel_head_h_disabled
NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32);
#define __objCreate_KernelHead(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_KernelHead((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead);
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadGetVblankTotalCounter(struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadGetVblankTotalCounter(pKernelHead) kheadGetVblankTotalCounter_IMPL(pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadGetVblankTotalCounter_HAL(pKernelHead) kheadGetVblankTotalCounter(pKernelHead)
void kheadSetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadSetVblankTotalCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadSetVblankTotalCounter(pKernelHead, arg0) kheadSetVblankTotalCounter_IMPL(pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadSetVblankTotalCounter_HAL(pKernelHead, arg0) kheadSetVblankTotalCounter(pKernelHead, arg0)
NvU32 kheadGetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead);
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadGetVblankLowLatencyCounter(struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadGetVblankLowLatencyCounter(pKernelHead) kheadGetVblankLowLatencyCounter_IMPL(pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadGetVblankLowLatencyCounter_HAL(pKernelHead) kheadGetVblankLowLatencyCounter(pKernelHead)
void kheadSetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadSetVblankLowLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadSetVblankLowLatencyCounter(pKernelHead, arg0) kheadSetVblankLowLatencyCounter_IMPL(pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadSetVblankLowLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankLowLatencyCounter(pKernelHead, arg0)
static inline NvU32 kheadGetVblankNormLatencyCounter_46f6a7(struct KernelHead *pKernelHead) {
return NV_ERR_NOT_SUPPORTED;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadGetVblankNormLatencyCounter(struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadGetVblankNormLatencyCounter(pKernelHead) kheadGetVblankNormLatencyCounter_46f6a7(pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadGetVblankNormLatencyCounter_HAL(pKernelHead) kheadGetVblankNormLatencyCounter(pKernelHead)
static inline void kheadSetVblankNormLatencyCounter_b3696a(struct KernelHead *pKernelHead, NvU32 arg0) {
return;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadSetVblankNormLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadSetVblankNormLatencyCounter(pKernelHead, arg0) kheadSetVblankNormLatencyCounter_b3696a(pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadSetVblankNormLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankNormLatencyCounter(pKernelHead, arg0)
static inline NvBool kheadReadVblankIntrEnable_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
return ((NvBool)(0 != 0));
}
#ifdef __nvoc_kernel_head_h_disabled
static inline NvBool kheadReadVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return NV_FALSE;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadReadVblankIntrEnable(pGpu, pKernelHead) kheadReadVblankIntrEnable_491d52(pGpu, pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) kheadReadVblankIntrEnable(pGpu, pKernelHead)
static inline NvBool kheadGetDisplayInitialized_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
return ((NvBool)(0 != 0));
}
#ifdef __nvoc_kernel_head_h_disabled
static inline NvBool kheadGetDisplayInitialized(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return NV_FALSE;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadGetDisplayInitialized(pGpu, pKernelHead) kheadGetDisplayInitialized_491d52(pGpu, pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadGetDisplayInitialized_HAL(pGpu, pKernelHead) kheadGetDisplayInitialized(pGpu, pKernelHead)
static inline void kheadWriteVblankIntrEnable_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) {
return;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable_b3696a(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0)
static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_PRECOMP(0);
return;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadProcessVblankCallbacks(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_e426af(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0)
static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
NV_ASSERT_PRECOMP(0);
return;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadResetPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadResetPendingVblank(pGpu, pKhead, arg0) kheadResetPendingVblank_e426af(pGpu, pKhead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadResetPendingVblank_HAL(pGpu, pKhead, arg0) kheadResetPendingVblank(pGpu, pKhead, arg0)
static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
NV_ASSERT_PRECOMP(0);
return;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadResetPendingVblankForKernel(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadResetPendingVblankForKernel(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel_e426af(pGpu, pKhead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadResetPendingVblankForKernel_HAL(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel(pGpu, pKhead, arg0)
static inline NvU32 kheadReadPendingVblank_92bfc3(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) {
NV_ASSERT_PRECOMP(0);
return NV_ERR_NOT_SUPPORTED;
}
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadReadPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadReadPendingVblank(pGpu, pKernelHead, intr) kheadReadPendingVblank_92bfc3(pGpu, pKernelHead, intr)
#endif //__nvoc_kernel_head_h_disabled
#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, intr) kheadReadPendingVblank(pGpu, pKernelHead, intr)
NV_STATUS kheadConstruct_IMPL(struct KernelHead *arg_pKernelHead);
#define __nvoc_kheadConstruct(arg_pKernelHead) kheadConstruct_IMPL(arg_pKernelHead)
void kheadAddVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadAddVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadAddVblankCallback(pGpu, pKernelHead, arg0) kheadAddVblankCallback_IMPL(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
void kheadDeleteVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadDeleteVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadDeleteVblankCallback(pGpu, pKernelHead, arg0) kheadDeleteVblankCallback_IMPL(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
NvU32 kheadCheckVblankCallbacksQueued_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1);
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadCheckVblankCallbacksQueued(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, arg0, arg1) kheadCheckVblankCallbacksQueued_IMPL(pGpu, pKernelHead, arg0, arg1)
#endif //__nvoc_kernel_head_h_disabled
NvU32 kheadReadVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead);
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadReadVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadReadVblankIntrState(pGpu, pKernelHead) kheadReadVblankIntrState_IMPL(pGpu, pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
void kheadWriteVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadWriteVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadWriteVblankIntrState(pGpu, pKernelHead, arg0) kheadWriteVblankIntrState_IMPL(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#undef PRIVATE_FIELD
void kheadProcessVblankCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 state);
#endif // KERNEL_HEAD_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_KERNEL_HEAD_NVOC_H_

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,428 @@
#define NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_mem_mgr_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x22ad47 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
void __nvoc_init_MemoryManager(MemoryManager*, RmHalspecOwner* );
void __nvoc_init_funcTable_MemoryManager(MemoryManager*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager*, RmHalspecOwner* );
void __nvoc_init_dataField_MemoryManager(MemoryManager*, RmHalspecOwner* );
void __nvoc_dtor_MemoryManager(MemoryManager*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager;
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_MemoryManager = {
/*pClassDef=*/ &__nvoc_class_def_MemoryManager,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryManager,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
};
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_OBJENGSTATE = {
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryManager = {
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_MemoryManager_MemoryManager,
&__nvoc_rtti_MemoryManager_OBJENGSTATE,
&__nvoc_rtti_MemoryManager_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager =
{
/*classInfo=*/ {
/*size=*/ sizeof(MemoryManager),
/*classId=*/ classId(MemoryManager),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "MemoryManager",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryManager,
/*pCastInfo=*/ &__nvoc_castinfo_MemoryManager,
/*pExportInfo=*/ &__nvoc_export_info_MemoryManager
};
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static void __nvoc_thunk_OBJENGSTATE_memmgrStateDestroy(POBJGPU pGpu, struct MemoryManager *pEngstate) {
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static void __nvoc_thunk_OBJENGSTATE_memmgrInitMissing(POBJGPU pGpu, struct MemoryManager *pEngstate) {
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrGetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) {
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunables1, pTunables2);
}
static void __nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) {
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), ppTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrSetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrConstructEngine(POBJGPU pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg0) {
return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
}
static NvBool __nvoc_thunk_OBJENGSTATE_memmgrIsPresent(POBJGPU pGpu, struct MemoryManager *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_dtor_MemoryManager(MemoryManager *pThis) {
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
// Hal field -- bFbRegionsSupported
if (0)
{
}
// default
else
{
pThis->bFbRegionsSupported = ((NvBool)(0 != 0));
}
// Hal field -- bPmaEnabled
if (0)
{
}
// default
else
{
pThis->bPmaEnabled = ((NvBool)(0 != 0));
}
// Hal field -- bClientPageTablesPmaManaged
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bClientPageTablesPmaManaged = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bClientPageTablesPmaManaged = ((NvBool)(0 != 0));
}
// Hal field -- bScanoutSysmem
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bScanoutSysmem = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bScanoutSysmem = ((NvBool)(0 != 0));
}
// Hal field -- bDisallowSplitLowerMemory
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bDisallowSplitLowerMemory = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bDisallowSplitLowerMemory = ((NvBool)(0 != 0));
}
// Hal field -- bSmallPageCompression
if (0)
{
}
// default
else
{
pThis->bSmallPageCompression = ((NvBool)(0 != 0));
}
// Hal field -- bSysmemCompressionSupportDef
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bSysmemCompressionSupportDef = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bSysmemCompressionSupportDef = ((NvBool)(0 != 0));
}
// Hal field -- bBug2301372IncreaseRmReserveMemoryWar
if (0)
{
}
// default
else
{
pThis->bBug2301372IncreaseRmReserveMemoryWar = ((NvBool)(0 != 0));
}
pThis->bEnableDynamicPageOfflining = ((NvBool)(0 != 0));
// Hal field -- bVgpuPmaSupport
if (0)
{
}
// default
else
{
pThis->bVgpuPmaSupport = ((NvBool)(0 != 0));
}
// Hal field -- bAllowNoncontiguousAllocation
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
{
pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 != 0));
}
// Hal field -- bScrubOnFreeEnabled
if (0)
{
}
// default
else
{
pThis->bScrubOnFreeEnabled = ((NvBool)(0 != 0));
}
// Hal field -- bFastScrubberEnabled
if (0)
{
}
// default
else
{
pThis->bFastScrubberEnabled = ((NvBool)(0 != 0));
}
}
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
if (status != NV_OK) goto __nvoc_ctor_MemoryManager_fail_OBJENGSTATE;
__nvoc_init_dataField_MemoryManager(pThis, pRmhalspecowner);
goto __nvoc_ctor_MemoryManager_exit; // Success
__nvoc_ctor_MemoryManager_fail_OBJENGSTATE:
__nvoc_ctor_MemoryManager_exit:
return status;
}
static void __nvoc_init_funcTable_MemoryManager_1(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
pThis->__memmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState;
pThis->__memmgrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateLoad;
pThis->__memmgrStateUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateUnload;
pThis->__memmgrStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked;
pThis->__memmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad;
pThis->__memmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload;
pThis->__memmgrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateDestroy;
pThis->__memmgrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload;
pThis->__memmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked;
pThis->__memmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_memmgrInitMissing;
pThis->__memmgrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked;
pThis->__memmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked;
pThis->__memmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrGetTunableState;
pThis->__memmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState;
pThis->__memmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState;
pThis->__memmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad;
pThis->__memmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState;
pThis->__memmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrSetTunableState;
pThis->__memmgrConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_memmgrConstructEngine;
pThis->__memmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_memmgrIsPresent;
}
void __nvoc_init_funcTable_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_MemoryManager_1(pThis, pRmhalspecowner);
}
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
void __nvoc_init_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_MemoryManager = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
__nvoc_init_funcTable_MemoryManager(pThis, pRmhalspecowner);
}
NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags) {
NV_STATUS status;
Object *pParentObj;
MemoryManager *pThis;
RmHalspecOwner *pRmhalspecowner;
pThis = portMemAllocNonPaged(sizeof(MemoryManager));
if (pThis == NULL) return NV_ERR_NO_MEMORY;
portMemSet(pThis, 0, sizeof(MemoryManager));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryManager);
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
}
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_MemoryManager(pThis, pRmhalspecowner);
status = __nvoc_ctor_MemoryManager(pThis, pRmhalspecowner);
if (status != NV_OK) goto __nvoc_objCreate_MemoryManager_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_MemoryManager_cleanup:
// do not call destructors here since the constructor already called them
portMemFree(pThis);
return status;
}
NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
status = __nvoc_objCreate_MemoryManager(ppThis, pParent, createFlags);
return status;
}

Some files were not shown because too many files have changed in this diff Show More