mirror of
git://nv-tegra.nvidia.com/tegra/kernel-src/nv-kernel-display-driver.git
synced 2025-12-22 09:11:48 +03:00
12f1806bdc25917299525e0e48815306159de132 - nvdisplay/Makefile 7d577fdb9594ae572ff38fdda682a4796ab832ca - nvdisplay/COPYING c2e810fc3453d74ee0493168dbf7981ba482acd3 - nvdisplay/SECURITY.md 6cd5d1b33d4d80772a8fec993f27ffa7704bf7bc - nvdisplay/README.md 9bb6ebf912480ef87d369b10de8dc9e800711651 - nvdisplay/kernel-open/Kbuild 4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - nvdisplay/kernel-open/Makefile 90d4457b6fec29378645d5932ad82d706942f4a6 - nvdisplay/kernel-open/conftest.sh 0b1508742a1c5a04b6c3a4be1b48b506f4180848 - nvdisplay/kernel-open/dkms.conf ea98628370602119afb1a065ff954784757ddb10 - nvdisplay/kernel-open/common/inc/os_dsi_panel_props.h 4750735d6f3b334499c81d499a06a654a052713d - nvdisplay/kernel-open/common/inc/nv-caps.h 60ef64c0f15526ae2d786e5cec07f28570f0663b - nvdisplay/kernel-open/common/inc/conftest.h 880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - nvdisplay/kernel-open/common/inc/nv-pci-types.h 8c041edbf4ed4fefdfd8006252cf542e34aa617b - nvdisplay/kernel-open/common/inc/nvtypes.h c45b2faf17ca2a205c56daa11e3cb9d864be2238 - nvdisplay/kernel-open/common/inc/nv-modeset-interface.h 80fcb510fad25cb7a017139f487da1843b7cfcbd - nvdisplay/kernel-open/common/inc/nv-lock.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - nvdisplay/kernel-open/common/inc/nvgputypes.h e4a4f57abb8769d204468b2f5000c81f5ea7c92f - nvdisplay/kernel-open/common/inc/nv-procfs.h df0420a5e3576e5a8b77a7bcefa6888ad62d6fd7 - nvdisplay/kernel-open/common/inc/nv.h 4b7414705ce10f0a1e312c36a43824b59d572661 - nvdisplay/kernel-open/common/inc/nvmisc.h 4b1a6c372a531b0d3e0a4e9815dde74cb222447c - nvdisplay/kernel-open/common/inc/rm-gpu-ops.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - nvdisplay/kernel-open/common/inc/dce_rm_client_ipc.h 349696856890bdbe76f457376648522b35f874ef - nvdisplay/kernel-open/common/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - nvdisplay/kernel-open/common/inc/nv_stdarg.h 82940edf4650b9be67275d3a360ef4e63387a0a7 - nvdisplay/kernel-open/common/inc/cpuopsys.h 84e9b6cba7ba26ef4032666f769c5b43fa510aad - nvdisplay/kernel-open/common/inc/nv-list-helpers.h b02c378ac0521c380fc2403f0520949f785b1db6 - nvdisplay/kernel-open/common/inc/nv-dmabuf.h 59d537c1d1b284a9d52277aff87c237e3ec2c99d - nvdisplay/kernel-open/common/inc/nv-procfs-utils.h 35da37c070544f565d0f1de82abc7569b5df06af - nvdisplay/kernel-open/common/inc/nv_firmware_types.h c57259130166701bf6d5e5bb1968397716d29fc0 - nvdisplay/kernel-open/common/inc/nv-platform.h 5c4c05e5a638888babb5a8af2f0a61c94ecd150b - nvdisplay/kernel-open/common/inc/nvkms-format.h 42ece56d0459eb9f27b2497de48f08360c4f7f6b - nvdisplay/kernel-open/common/inc/nvlimits.h 143051f69a53db0e7c5d2f846a9c14d666e264b4 - nvdisplay/kernel-open/common/inc/nv-kref.h f3e0f71abf34300d322e313adcd4fcbde9aa6f87 - nvdisplay/kernel-open/common/inc/nv-kthread-q.h b4c5d759f035b540648117b1bff6b1701476a398 - nvdisplay/kernel-open/common/inc/nvCpuUuid.h d721fca5f2317b9b325dedcbfba51c00d0b23648 - nvdisplay/kernel-open/common/inc/nv-linux.h 4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - nvdisplay/kernel-open/common/inc/nv-register-module.h 88399279bd5e31b6e77cb32c7ef6220ce529526b - nvdisplay/kernel-open/common/inc/nv-hypervisor.h 256b5dc6f28738b3ce656c984f01d8f3e13e9faa - nvdisplay/kernel-open/common/inc/nv-pgprot.h 5d8de06378994201e91c2179d149c0edcd694900 - nvdisplay/kernel-open/common/inc/nvstatuscodes.h 7b2e2e6ff278acddc6980b330f68e374f38e0a6c - nvdisplay/kernel-open/common/inc/nv-timer.h d25291d32caef187daf3589ce4976e4fa6bec70d - nvdisplay/kernel-open/common/inc/nv-time.h 906329ae5773732896e6fe94948f7674d0b04c17 - nvdisplay/kernel-open/common/inc/os_gpio.h 6337f595602bce9d76559de1be90553b52f405d8 - nvdisplay/kernel-open/common/inc/nv-proto.h 2f5fec803685c61c13f7955baaed056b5524652c - nvdisplay/kernel-open/common/inc/nv-ioctl.h fdbaee144adb26c00776b802560e15f775ed5aef - nvdisplay/kernel-open/common/inc/nv-mm.h 94ad0ba9fd6eb21445baec4fddd7c67a30cceefa - nvdisplay/kernel-open/common/inc/nv-pci.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - nvdisplay/kernel-open/common/inc/nvstatus.h c06b2748cd7c8f86b5864d5e9abe6ecf0ab622f0 - nvdisplay/kernel-open/common/inc/nv-hash.h 009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - nvdisplay/kernel-open/common/inc/nvkms-kapi.h 423282211355a8cb20bff268166885ac90e2986c - nvdisplay/kernel-open/common/inc/nv_uvm_interface.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - nvdisplay/kernel-open/common/inc/nv-kernel-interface-api.h 910255a4d92e002463175a28e38c3f24716fb654 - nvdisplay/kernel-open/common/inc/nvkms-api-types.h 3100c536eb4c81ae913b92d4bc5905e752301311 - nvdisplay/kernel-open/common/inc/os-interface.h 003b2cbe3d82e467c09371aee86e48d65ae6c29b - nvdisplay/kernel-open/common/inc/nv-ioctl-numa.h c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - nvdisplay/kernel-open/common/inc/nv-memdbg.h 1d17329caf26cdf931122b3c3b7edf4932f43c38 - nvdisplay/kernel-open/common/inc/nv-msi.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - nvdisplay/kernel-open/common/inc/nvi2c.h e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - nvdisplay/kernel-open/common/inc/nv_speculation_barrier.h 1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - nvdisplay/kernel-open/common/inc/nv-ioctl-numbers.h e3362c33fe6c7cdec013eceac31e8f6f38dc465f - nvdisplay/kernel-open/common/inc/nv_uvm_types.h b642fb649ce2ba17f37c8aa73f61b38f99a74986 - nvdisplay/kernel-open/common/inc/nv-retpoline.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - nvdisplay/kernel-open/common/inc/nv-gpu-info.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - nvdisplay/kernel-open/common/inc/os/nv_memory_type.h 027fd0ab218eb98abe2b66d05f10b14ebb57e7a3 - nvdisplay/kernel-open/nvidia/nv-nano-timer.c e2b0e4ef01bb28ff6dcc10cb44570e185ce82df0 - nvdisplay/kernel-open/nvidia/nv-reg.h 218aac0c408be15523a2d0b70fdbdadd7e1a2e48 - nvdisplay/kernel-open/nvidia/nv-imp.c 64f1c96761f6d9e7e02ab049dd0c810196568036 - nvdisplay/kernel-open/nvidia/nv-pat.c 98c1be29932b843453567d4ada2f9912ea4523d7 - nvdisplay/kernel-open/nvidia/nv-vm.c 94c406f36836c3396b0ca08b4ff71496666b9c43 - nvdisplay/kernel-open/nvidia/os-usermap.c 7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - nvdisplay/kernel-open/nvidia/nv-usermap.c 5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - nvdisplay/kernel-open/nvidia/nv-vtophys.c d11ab03a617b29efcf00f85e24ebce60f91cf82c - nvdisplay/kernel-open/nvidia/nv-backlight.c cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - nvdisplay/kernel-open/nvidia/nv-rsync.h 3ee953312a6a246d65520fc4a65407f448d1d2b8 - nvdisplay/kernel-open/nvidia/nv-gpio.c f179d308e984ff44a82f6e1c6007624f1ac916ba - nvdisplay/kernel-open/nvidia/nv-procfs.c c5cfba80ea122c9078f2d44f1538144747d7931b - nvdisplay/kernel-open/nvidia/nv.c 2c0d17f9babe897435c7dfa43adb96020f45da2b - nvdisplay/kernel-open/nvidia/nv-dsi-parse-panel-props.c cded6e9b6324fd429b865173596c8e549a682bba - nvdisplay/kernel-open/nvidia/nv_uvm_interface.c fbae5663e3c278d8206d07ec6446ca4c2781795f - nvdisplay/kernel-open/nvidia/nv-ibmnpu.h dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - nvdisplay/kernel-open/nvidia/nv-modeset-interface.c 9999872b1513360d8ecf6c0894f81c63e7d435e9 - nvdisplay/kernel-open/nvidia/nv-dma.c ee894ec530acbd765c04aec93c1c312d42210aeb - nvdisplay/kernel-open/nvidia/nv-ipc-soc.c 06e7ec77cd21c43f900984553a4960064753e444 - nvdisplay/kernel-open/nvidia/nv-platform-pm.c c1ebcfec42f7898dd9d909eacd439d288b80523f - nvdisplay/kernel-open/nvidia/os-mlock.c 0b7e063481a0e195c6e91a4d3464c4792c684f03 - nvdisplay/kernel-open/nvidia/nv-kthread-q.c a392fa800565c8345b07af5132db7078b914d59f - nvdisplay/kernel-open/nvidia/os-pci.c 26c3971ea7afb4b7f237db9ab1c321c3de814518 - nvdisplay/kernel-open/nvidia/nv-p2p.c d9221522e02e18b037b8929fbc075dc3c1e58654 - nvdisplay/kernel-open/nvidia/nv-pci-table.c a3626bf1b80a81c14408c5181e8bd27696df2caf - nvdisplay/kernel-open/nvidia/nv-pci.c 0b0ec8d75dfece909db55136731196162c4152d5 - nvdisplay/kernel-open/nvidia/nv-dmabuf.c 0ce95e5ed52d6d6ca2bb6aac33ca8f197145ec45 - nvdisplay/kernel-open/nvidia/nv-procfs-utils.c 6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - nvdisplay/kernel-open/nvidia/rmp2pdefines.h b71bf4426322ab59e78e2a1500509a5f4b2b71ab - nvdisplay/kernel-open/nvidia/nv-pat.h d4f2cac6234e5ad337c254875a26d17372f28162 - nvdisplay/kernel-open/nvidia/os-interface.c 5f2e279a4abe0dabd478b1589be67df18de4b09d - nvdisplay/kernel-open/nvidia/nv-i2c.c 8bedc7374d7a43250e49fb09139c511b489d45e3 - nvdisplay/kernel-open/nvidia/nv-pci-table.h c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - nvdisplay/kernel-open/nvidia/nv-report-err.c fc566df59becef7bc7511ae62a9a97b1532a5af2 - nvdisplay/kernel-open/nvidia/nv-frontend.c fbfa2125b2bac1953af6d6fd99352898e516a686 - nvdisplay/kernel-open/nvidia/nv-msi.c e903f50b2624f33807214973558b9ff380bd68e0 - nvdisplay/kernel-open/nvidia/nv-platform.c 495bcdff3847ff67ba4bbf9af23729eb66eed487 - nvdisplay/kernel-open/nvidia/nv-acpi.c dc165103f9196f5f9e97433ec32ef6dded86d4bb - nvdisplay/kernel-open/nvidia/os-registry.c 4eee7319202366822e17d29ecec9f662c075e7ac - nvdisplay/kernel-open/nvidia/nv-rsync.c 978d00b0d319c5ad5c0d3732b0e44f4ac0ac9a4c - nvdisplay/kernel-open/nvidia/nv_gpu_ops.h 61eadfa0f5b44a3d95e4d2d42d79321fc909c661 - nvdisplay/kernel-open/nvidia/nv-clk.c 07f95171c241880c472a630d1ee38fb222be4d59 - nvdisplay/kernel-open/nvidia/nvidia-sources.Kbuild 805042e7cdb9663a0d3ca3064baeec8aa8eb3688 - nvdisplay/kernel-open/nvidia/nv-ibmnpu.c 9a0f445fda73c69e1bee7f6b121cbed33fcb01bf - nvdisplay/kernel-open/nvidia/nv-mmap.c 68d781e929d103e6fa55fa92b5d4f933fbfb6526 - nvdisplay/kernel-open/nvidia/nv-report-err.h 95ae148b016e4111122c2d9f8f004b53e78998f3 - nvdisplay/kernel-open/nvidia/nv-memdbg.c 24fd035338936c76fda8faeb0d8b1cd59875db92 - nvdisplay/kernel-open/nvidia/nvidia.Kbuild d844fcaa5b02f1d1a753965a336287148b2ce689 - nvdisplay/kernel-open/nvidia/nv-p2p.h 7b1bd10726481626dd51f4eebb693794561c20f6 - nvdisplay/kernel-open/nvidia/nv-host1x.c 02b1936dd9a9e30141245209d79b8304b7f12eb9 - nvdisplay/kernel-open/nvidia/nv-cray.c 84d84563c003d3f568068e7322ce314387a6f579 - nvdisplay/kernel-open/nvidia/nv-caps.c 9b701fe42a0e87d62c58b15c553086a608e89f7b - nvdisplay/kernel-open/nvidia/nv-frontend.h 45ec9fd1abfe9a0c7f9ffaf665014cec89c9e7e6 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-crtc.h c8982ace6fc79f75c092662902c0c61371195f0c - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-linux.c 8b2063f0cc2e328f4f986c2ce556cfb626c89810 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-utils.c 6d65ea9f067e09831a8196022bfe00a145bec270 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h 7129c765da5bfb77788441fed39b46dc7dc0fa8e - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c a7bc26c1078e95f9ff49c164f3652787adf1fef3 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-modeset.c 23586447526d9ffedd7878b6cf5ba00139fadb5e - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h 99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - nvdisplay/kernel-open/nvidia-drm/nvidia-drm.h 66b33e4ac9abe09835635f6776c1222deefad741 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-fb.h c294224282118c70cd546ae024a95479ad9b1de4 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h 2911436a80d67074106c507871f4b480aa307237 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-helper.c ef03d0ae581cc0326abe6054249791f8c0faa9a8 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c dc0fe38909e2f38e919495b7b4f21652a035a3ee - nvdisplay/kernel-open/nvidia-drm/nvidia-drm.c 59bb05ef214b5c5f2fe3cf70142dabd47ea70650 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-ioctl.h 892cac6dd51ccfde68b3c29a5676504f93ee8cd7 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-format.c 9a882b31b2acc9e1ad3909c0061eee536e648aae - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-drv.h e4bb0073eb9d6f965923bb9874e4714518850a27 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-connector.h d9221522e02e18b037b8929fbc075dc3c1e58654 - nvdisplay/kernel-open/nvidia-drm/nv-pci-table.c eca70b3b8146903ec678a60eebb0462e6ccf4569 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-encoder.h 1e05d0ff4e51a10fa3fcd6519dc915bf13aa69c0 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-helper.h ab63f2a971db8bf10585b1a05fe0e3ca180ad6c7 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-os-interface.h 355126d65ea1472ce3b278066811d4fb764354ec - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c 5008845a531207899830bcf4568c3463ad0ea6bc - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-drv.c b775af5899366845f9b87393d17a0ab0f1f6a725 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem.c d862cc13c29bbce52f6b380b7a0a45a07fe9cbac - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-encoder.c 8bedc7374d7a43250e49fb09139c511b489d45e3 - nvdisplay/kernel-open/nvidia-drm/nv-pci-table.h 044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-conftest.h e4efab24f90d397c270568abb337ab815a447fec - nvdisplay/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h e362c64aa67b47becdbf5c8ba2a245e135adeedf - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c 492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - nvdisplay/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h fa8d8d10ae773bb7db3b3ce1df545de0e04c937e - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-connector.c 97b6c56b1407de976898e0a8b5a8f38a5211f8bb - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-format.h 708d02c8bcdfb12e4d55896e667821357c8251ec - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-priv.h 8c95aa7ab01dd928974ce7880a532557209bd8e0 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-gem.h 6528efa1f8061678b8543c5c0be8761cab860858 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-modeset.h cbcd6e13d84ea6b52db12eda98be38e321888eb0 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h 3c9a013abdc787a1022b11099af4277c37cd666b - nvdisplay/kernel-open/nvidia-drm/nvidia-drm.Kbuild 40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-utils.h 5209eba37913f5d621a13091783622759706e6e3 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-fb.c bb1f2105d19b50634d46a92ade7fc5f709ec25d3 - nvdisplay/kernel-open/nvidia-drm/nvidia-drm-crtc.c 7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - nvdisplay/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h c181ab9960b0c01a7672bc1fe1bc8870f1e8856d - nvdisplay/kernel-open/nvidia-modeset/nvidia-modeset-linux.c 17855f638fd09abfec7d188e49b396793a9f6106 - nvdisplay/kernel-open/nvidia-modeset/nvkms.h 0b7e063481a0e195c6e91a4d3464c4792c684f03 - nvdisplay/kernel-open/nvidia-modeset/nv-kthread-q.c 07a2d5fa54ff88a0cb30c0945ef3c33ca630a490 - nvdisplay/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild 2ea1436104463c5e3d177e8574c3b4298976d37e - nvdisplay/kernel-open/nvidia-modeset/nvkms-ioctl.h 50d31a6d133b0ea9230f9dc1b701ce16a88a7935 - nvdisplay/src/common/sdk/nvidia/inc/rs_access.h a9bf4969ae3e39cc315b6180ee7055e0ad1279c6 - nvdisplay/src/common/sdk/nvidia/inc/nvtypes.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - nvdisplay/src/common/sdk/nvidia/inc/nvgputypes.h 4b7414705ce10f0a1e312c36a43824b59d572661 - nvdisplay/src/common/sdk/nvidia/inc/nvmisc.h 821a01976045d7c3d2ac35b0f115e90a9e95f8e8 - nvdisplay/src/common/sdk/nvidia/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - nvdisplay/src/common/sdk/nvidia/inc/nv_stdarg.h f28f98589e65b71e47dbcb2c4230538ae0545e75 - nvdisplay/src/common/sdk/nvidia/inc/cpuopsys.h ae60d53603c7ddbbd72d4e16ce2951f3d42aed32 - nvdisplay/src/common/sdk/nvidia/inc/nverror.h eb42327a2b948b79edc04d9145c7aa5b2a2b420e - nvdisplay/src/common/sdk/nvidia/inc/nvlimits.h 88399279bd5e31b6e77cb32c7ef6220ce529526b - nvdisplay/src/common/sdk/nvidia/inc/nv-hypervisor.h 5d8de06378994201e91c2179d149c0edcd694900 - nvdisplay/src/common/sdk/nvidia/inc/nvstatuscodes.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - nvdisplay/src/common/sdk/nvidia/inc/nvstatus.h a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - nvdisplay/src/common/sdk/nvidia/inc/nvcfg_sdk.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - nvdisplay/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h af0bc90b3ad4767de53b8ff91e246fdab0146e8b - nvdisplay/src/common/sdk/nvidia/inc/nvsecurityinfo.h a31b82c454df785a1d7893af38e83443cfe6f2fc - nvdisplay/src/common/sdk/nvidia/inc/nvdisptypes.h ffa91e1110a5cc286ec44a7bda5461b2be941ea2 - nvdisplay/src/common/sdk/nvidia/inc/nv_vgpu_types.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - nvdisplay/src/common/sdk/nvidia/inc/nvi2c.h 91e9bc3214d6bb9b20bc8001d85fe8699df5184a - nvdisplay/src/common/sdk/nvidia/inc/nvos.h 9bca638f5832d831880f090c583fac6fc8cf6ee6 - nvdisplay/src/common/sdk/nvidia/inc/dpringbuffertypes.h 9f2e225f027f5a04d1104d29a0039cd2bb7dd85a - nvdisplay/src/common/sdk/nvidia/inc/nvfixedtypes.h a7c7899429766c092ee3ecf5f672b75bef55216c - nvdisplay/src/common/sdk/nvidia/inc/class/cl9271.h 4b8f95693f79a036317ab2f85e150c102ad782e9 - nvdisplay/src/common/sdk/nvidia/inc/class/cl84a0.h 36b0dd6de0d0b49d435a4662c35d1f4ae5b2b1bc - nvdisplay/src/common/sdk/nvidia/inc/class/cl9870.h e6818f1728a66a70080e87dac15a6f92dd875b4e - nvdisplay/src/common/sdk/nvidia/inc/class/cl927d.h ecc56a5803b85187aa95b788aedd4fa2262c1bb6 - nvdisplay/src/common/sdk/nvidia/inc/class/cl2080.h 866977d299eac812b41eb702a517e27bdc56e875 - nvdisplay/src/common/sdk/nvidia/inc/class/clc37a.h 9bd9f416844d798f352fcc6c8aaf2c251253c068 - nvdisplay/src/common/sdk/nvidia/inc/class/cl90cd.h 95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - nvdisplay/src/common/sdk/nvidia/inc/class/cl90f1.h ba76ecbebe0ed71ea861ed7016abbfc16ced2df7 - nvdisplay/src/common/sdk/nvidia/inc/class/cl5070_notification.h b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - nvdisplay/src/common/sdk/nvidia/inc/class/cl0073.h 2f87e87bcf9f38017ad84417d332a6aa7022c88f - nvdisplay/src/common/sdk/nvidia/inc/class/cl9471.h ddbffcce44afa7c07924fd64a608f7f3fe608ccc - nvdisplay/src/common/sdk/nvidia/inc/class/cl0071.h 1409efc057e4f0d55602f374ec006f9db7ad3926 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0000.h c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - nvdisplay/src/common/sdk/nvidia/inc/class/cl907dswspare.h 8b75d2586151302d181f59d314b6b3f9f80b8986 - nvdisplay/src/common/sdk/nvidia/inc/class/clc573.h 593384ce8938ceeec46c782d6869eda3c7b8c274 - nvdisplay/src/common/sdk/nvidia/inc/class/cl900e.h dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0076.h 053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0004.h 78259dc2a70da76ef222ac2dc460fe3caa32457a - nvdisplay/src/common/sdk/nvidia/inc/class/clc37e.h b7a5b31a8c3606aa98ba823e37e21520b55ba95c - nvdisplay/src/common/sdk/nvidia/inc/class/cl402c.h 13f8e49349460ef0480b74a7043d0591cf3eb68f - nvdisplay/src/common/sdk/nvidia/inc/class/clc57b.h c2600834921f8a6aad6a0404076fa76f9bc1c04d - nvdisplay/src/common/sdk/nvidia/inc/class/clc37b.h 861b9d7581eab4a2b8cc7269b5d0e0d1294048d1 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0005.h 0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0000_notification.h 941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - nvdisplay/src/common/sdk/nvidia/inc/class/cl917e.h cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - nvdisplay/src/common/sdk/nvidia/inc/class/clc57e.h a30755b3003023c093f8724cf9a2e0b0c301b586 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9010.h d1a19dee52b3318714026f4fcc748cfa4681cd25 - nvdisplay/src/common/sdk/nvidia/inc/class/clc370.h 4bbb861011139be1c76b521eaa7ae10951d5bf9a - nvdisplay/src/common/sdk/nvidia/inc/class/cl2081.h 2e3d5c71793820d90973d547d8afdf41ff989f89 - nvdisplay/src/common/sdk/nvidia/inc/class/clc67a.h fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - nvdisplay/src/common/sdk/nvidia/inc/class/clc671.h e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - nvdisplay/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h 15136a724baab270914a01a8c0e8f2c2c83675b6 - nvdisplay/src/common/sdk/nvidia/inc/class/cl00c3.h 95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0020.h 509c56534ed6d48b06494bb22d3cf58d63254a05 - nvdisplay/src/common/sdk/nvidia/inc/class/clc574.h eac86d7180236683b86f980f89ec7ebfe6c85791 - nvdisplay/src/common/sdk/nvidia/inc/class/cl957d.h 11fd2de68ab82b81211aa20c66a9a6595199f673 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9270.h c5ef1b16b2bd2e33f52b71f2b78db789ebb844f0 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9770.h e0c9a155f829c158c02c21b49c083168f8b00cbe - nvdisplay/src/common/sdk/nvidia/inc/class/clc37dswspare.h bae36cac0a8d83003ded2305409192995d264d04 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0001.h 05605d914edda157385e430ccdbeb3fcd8ad3c36 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9171.h a23967cf3b15eefe0cc37fef5d03dfc716770d85 - nvdisplay/src/common/sdk/nvidia/inc/class/clc372sw.h 02ff42b6686954e4571b8a318575372239db623b - nvdisplay/src/common/sdk/nvidia/inc/class/cl30f1_notification.h 545dd1899c6988ffe5f50300232bd862d915cd5b - nvdisplay/src/common/sdk/nvidia/inc/class/clc770.h 26c3ccc33328a66ad3bcfe999424dffda991264f - nvdisplay/src/common/sdk/nvidia/inc/class/clc670.h 02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - nvdisplay/src/common/sdk/nvidia/inc/class/clc57esw.h 326dbbeb275b4fc29f6a7e2e42b32736474fec04 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9571.h 9b2d08d7a37beea802642f807d40413c7f9a8212 - nvdisplay/src/common/sdk/nvidia/inc/class/clc37d.h 68c953956a63ef8f7f9bcbe71057af510f4597c1 - nvdisplay/src/common/sdk/nvidia/inc/class/clb0b5sw.h ab27db8414f1400a3f4d9011e83ac49628b4fe91 - nvdisplay/src/common/sdk/nvidia/inc/class/cl987d.h 03ab4e08e8685696477b62eb1a825e5198d61b8a - nvdisplay/src/common/sdk/nvidia/inc/class/cl0080.h 9db39be032023bff165cd9d36bee2466617015a5 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0002.h e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - nvdisplay/src/common/sdk/nvidia/inc/class/cl90ec.h 11b19cb8d722146044ad5a12ae96c13ed5b122b6 - nvdisplay/src/common/sdk/nvidia/inc/class/cl917b.h bb8d15aee43e1feb76fddf80398e93fd805f1ddb - nvdisplay/src/common/sdk/nvidia/inc/class/cl2082.h 204feb997ba42deab327d570e5f12235d5160f00 - nvdisplay/src/common/sdk/nvidia/inc/class/clc57a.h 15d1f928a9b3f36065e377e29367577ae92ab065 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0080_notification.h 82c9df617999f93ebd9362851966f601b8131fdd - nvdisplay/src/common/sdk/nvidia/inc/class/clc570.h 060722ac6a529a379375bb399785cbf2380db4fd - nvdisplay/src/common/sdk/nvidia/inc/class/clc373.h 92c2dab6bc48f32f46c6bbc282c63cb4ec7a50bf - nvdisplay/src/common/sdk/nvidia/inc/class/cl9170.h c61f8348c2978eef0a07191aaf92bd73e935f7bd - nvdisplay/src/common/sdk/nvidia/inc/class/clc67e.h 4a6444c347825e06bdd62401120553469f79c188 - nvdisplay/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h 026f66c4cc7baad36f1af740ae885dae58498e07 - nvdisplay/src/common/sdk/nvidia/inc/class/clc371.h ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - nvdisplay/src/common/sdk/nvidia/inc/class/cl003e.h 0285aed652c6aedd392092cdf2c7b28fde13a263 - nvdisplay/src/common/sdk/nvidia/inc/class/cl00fc.h 1efc9d4aa038f208cd19533f6188ac3a629bf31a - nvdisplay/src/common/sdk/nvidia/inc/class/cl917a.h 38265d86eb7c771d2d3fc5102d53e6a170a7f560 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0041.h 4d5ccf08ab73343343e0c804002a621996866161 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0092.h 022e8405220e482f83629dd482efee81cc49f665 - nvdisplay/src/common/sdk/nvidia/inc/class/clc77f.h ccefba28a2c7979701f963f2c358b4414b84ca98 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9570.h bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - nvdisplay/src/common/sdk/nvidia/inc/class/cl0040.h 127f78d2bb92ef3f74effd00c2c67cf7db5382fe - nvdisplay/src/common/sdk/nvidia/inc/class/clc67d.h b1133e9abe15cf7b22c04d9627afa2027e781b81 - nvdisplay/src/common/sdk/nvidia/inc/class/cl917c.h a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - nvdisplay/src/common/sdk/nvidia/inc/class/clc67b.h bd27ceb75c4604fef53658f16a5012d97c1534b2 - nvdisplay/src/common/sdk/nvidia/inc/class/cl9470.h b29ea3f13f501327c060b9ddfac5834ed396414a - nvdisplay/src/common/sdk/nvidia/inc/class/cl30f1.h 04ab1761d913030cb7485149ecd365f2f9c0f7da - nvdisplay/src/common/sdk/nvidia/inc/class/cl0005_notification.h da8d312d2fdc6012e354df4fa71ed62ae4aac369 - nvdisplay/src/common/sdk/nvidia/inc/class/cl927c.h 158c98c8721d558ab64a025e6fdd04ce7a16ba9e - nvdisplay/src/common/sdk/nvidia/inc/class/cl947d.h 5416c871e8d50a4e76cbad446030dbedbe1644fd - nvdisplay/src/common/sdk/nvidia/inc/class/cl00f2.h dd4f75c438d19c27e52f25b36fc8ded1ce02133c - nvdisplay/src/common/sdk/nvidia/inc/class/cl917cswspare.h 435a34753d445eb9711c7132d70bd26df2b8bdab - nvdisplay/src/common/sdk/nvidia/inc/class/cl917d.h 31939808cd46382b1c63bc1e0bd4af953302773f - nvdisplay/src/common/sdk/nvidia/inc/class/cl977d.h ea10b0d938d9314638882fdc20b9158a193f7b08 - nvdisplay/src/common/sdk/nvidia/inc/class/cl5070.h 28867d69a6ceac83da53a11a5e1ef87d9476f0be - nvdisplay/src/common/sdk/nvidia/inc/class/clc57d.h f5760f5054538f4ecf04d94fb1582a80a930bc29 - nvdisplay/src/common/sdk/nvidia/inc/class/clc673.h 76c430d54887ed14cace9409712259e10f042b4c - nvdisplay/src/common/sdk/nvidia/inc/class/cl00c1.h 6db83e33cb3432f34d4b55c3de222eaf793a90f0 - nvdisplay/src/common/sdk/nvidia/inc/class/cl00b1.h 1022bba330a71b92dcc81f47ba460209fcc70cd0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h 3449834cb8b8c630ab1de6df30503c846b26e86b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h e2d8133537e2687df022c6a966c55fbfea1974f3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h 5abe75cf18a2fede23529194b406c3cf742edced - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h 9c6a4f1d864b5161564869b19f8cb2ce9d629c1d - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h b72318d58806bfd25f922107a606b222baa2e28c - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h 79204c26eb58ee812cc2f72ee1f6d4d7d93817c7 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h 7c4aef225d174ecbe1130d63b8e8ff752bddf48e - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h f779cd0470e428160fc590b590f2cd4855950058 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h 00e9a0ace4b59958a8b048229fb22b4d9e2f8864 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h 0639d6cd553994aff4195e8e7547eebf8e713145 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h c8490da9f200f4dbbac7ebe636f3a83485f3001c - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h 7a0c878431a9b0d9dda117f165946b1cdf8ebbde - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h 6e5b278451308efbb6911a8ab03b0feba504d035 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h ef180860a1ccbcb9f5d2f8a6656a345eef76a2a7 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h 2518a62952c72ee6f3447bc8dc417129f6ac26a4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h 4a3e7d71b9169d703d9373ff80b02a63825a80e4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h 382dc80790d870047db7cea957ef208d4439801e - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h b66a45c83c84f6d458ef19fd7e0f972f2eabd109 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h 9373c51ca29afec3368fb5b8c2a2f05b0920f291 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h 825f4d976c76d375803e42967fdab53e7814d18d - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h abe79ad927e7c70b7c1a8eb687052a782efcd5f4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h 4d9116d23d27a3fc39c366f2685243b83ef7d485 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h f7e56d494fea02515180f21b0f56ae0aff583be4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h 0ee647b929e55cf39da7e26ffc0f027676fa52fa - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h 323fcc6af8c30d5ef292ae90810c5c2fa2009e20 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h c905766589d17fcb99a5d73846ed61f7b7db56fe - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h 76c9f104e04a8fd9e73e03ad59b2e72264c5f169 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h f4a4eeb35e15e0642d1bf4e2e5b31394f4cbbfa1 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h f7435e356d54d682a949734574388abbe7ffe1d0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h 6c34803c213ea0a28114bc921e1867cefebec088 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h 862a17958488d69ca3e92c42ee1bed55cb299fa4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b7f2957f506dc285acb87d41d34cfd60408b00ae - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h ea9aac6f0e23f0de444ac3919c35e4b78c18c942 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h d107e41878b5bc50a5c8b29684122c9589625a6f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h c72f147e8fb78126d13567278239acfcd9b9cc1f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h 64f849ed19609320461b8938f24f0b40fb1a35b0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h 681c94b982e29049638814f6c1e4eb508f8b0bf3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h b7b0360b1a6ca78267fa10f7adcd370da86513c3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h 8dd5acedc0b1613314eb3fe9130a9c282bd49ca1 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h bb4182eeea20779f62165d2d50ed209b6a07e54e - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h 3646710984d5c3024d16f9ab346222ad6dfdb4f0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h 9e61da81ecdff15d63f9ae8a1c2f0960b820c65c - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h dac18fcaf5d652b21f84cfba455f4f5972e786c5 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h cf78a847e0882e1d164eccdb86ea033126019599 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h 8294d43d202a9cd78367f2e69388a6c6f2c369f7 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h 1248e113751f8ed9e4111e86a7f7fb632b102eca - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h f3b81a241efe1224798b17c062e33936469c3c2b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h 7e0773f7bf13350a9fd25b0df4d6c45a55a008df - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h f88f1c519a242dfa71221bdcdafc7deab14d8503 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h 09dedebdcff3244ab8f607a7152e9116d821f9c1 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b921747a65c67fa093de08fa782c164d048824b0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h 3dc187adc0a848e68f62a6a7eb99ac02ee6502cc - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h 8fd661537cc4eb55c167b9daae404bfb82408bfe - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h ccc48726d7da49cddc4d4f86d8dbd2ad585f7b38 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h 440314f66374d35a1628ee8bd61836a80ab421eb - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h 92be535d68a7f18088921faa3f1742298ad341c3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h 24782552a13f627e2e94ebb5f7021246a0c0dc53 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h 76c31150e2f589fbb96cfc06cdc6c1801e128656 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h 7812ba094d95c1b6d65afc6a1d26930400b8b96f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h f1dae17e75a24c28135cf073bf29f9609a2418e3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h 7f5548026751a8caaebc245945ccdc4bb037b566 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h f523fe4a55a6a9d01f41f9f34ff149ed75b2e739 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h 91cccede5c4f26a6b6ca7ba4bc292f3d908a88d4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h e10cbe4875736ef16072232789dd3f48647c022f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h 84fb76f9cff38c797b139cba40175717591d49df - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h aec1b750866e34f9626e48c535336f93c5c246fa - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h 2f92bebccb9da5246b19bd13ff0e6e79de79bc3b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h 9031642283b59ee6d52e2e1ca54332df5c2f7acc - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h 209ef519cb73395cea7d66016448ebc3c6bf6fe4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h f47136417885a729f9c5dee375ec9dec1bd170e0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h ad7604ced12ee18c569d2a7ebe71e185ebff3fd4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h 0ac7e4eb4d952c84c6f4e697cbfcb355069377c2 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h c4474dc1f53661c67d8fce5303dcc636d9ad3b8f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h da220a5608a0e4c73fa0315b13e2b29d92b114e9 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h 291f91212d5a37aae46a2944cf89f4b74b1d1809 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h 8a613db1c31724a577c4718752c15d9754882f48 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h 090f908931690302e3a2c77f3ce41c4de0c61efc - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h 5bdddb9a949a78443f83a7da81ad5fee8a300c44 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h bc22bf13b7d99ee6f80c30b569e084a2b03e385a - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h a44d2f1b31b8ec124355018204909df19df09748 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h 82364e263f43ea028c2d66db58887958bdef64b0 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h 3966d65c9701bf97c807cf87838a08cda10f418d - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h e6f6beaed64167088608027b442f5449cff027c1 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h 1684a3a8111fd3d83363cebe68d016a54eaaf686 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h d084d99035f4cc34cd803ff4a5328b9e10ea77fc - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h a1830232f18afe44230d6a8598c50b3fc7656089 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h d51e47795dfe1fc0bae31b9379d6a39ac4d3080f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h 051dbfd1d5ff02b2771bc9b3fad8aaef29aab9ae - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h 82aa4d6108ce6abebcbbc95afcb7a6350e287f5f - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h 18814de559257f07bad8a0a9006ac9751fcfa1cb - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h e9d692b06c70951dbbd0663a89f822153bce1146 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h 6b4418e269bb97b9996b05ea153ccd195c661e11 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h 82a2e7a2fc6501163d07870f3f640a591f4a8996 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h 091f7bac99f5c786a64b6fa59d9d27af786bab10 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h 6834a9c75265c25adfb03f0b2dbfe0559f28cadf - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h b2b6b3b413ae17af1afde2fc8672cd1bf48e7b19 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h 39f5e838aa6ab007c56e7a59c7d2986d1a7aa34a - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h 2a11fc0a499f8293b83e08572f5e6be04bd1da61 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h 6679d97e3852ed78ee44780408c523b94f426ca4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h 3d8e37aa8485aadf55335d8f9f913273d90a2442 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h 7d27fafff043d290b2ec1d2dddbecea2f1df4704 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h 27ad8b5c2406fcd572cd098dd215e93ae1db99e3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h c3a75647f5ca6cd7b456511af36a9de6d90329c3 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h 7c4e426dee0ae86c00b3bd10873a1a2bd94ed3b2 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h 1ebfe9f0f9a7d2dd2873df82bbc78b1ec982ca93 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h 7d3819683e9f562a87f36a3e23c043b2b6fd814e - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h 4b8fa2ce546ae3f06b7dc61df3d534449cdb5b2d - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h 22d828c87b223f937c589a0e863a25d95b734371 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h f3a855fe7a91c2acf2be41629ce906996e01a9fc - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h 783db6da0b92b6b8ae26b180129beb0bccb13a5b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h 2dd40e3e41d74de3865bc700acc9ab7e0540c647 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h 143c1c24ec926142d1f84dec7a543f2b98541545 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h 72292c9844eaf24c38967dd4a879c0c0f070a0de - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h 1651ec548a2899391a05bc6463b3f7162c7807ab - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h 8855ee8bad2f2169ebd147e7ac77d9f1340cbad8 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h c0181e959c1ba5ebfc3f130c8764687b58453f9b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h 8ef946f1d7545277ef64891b45a29db44c4e9913 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h f97e7f88aa17788bbbebf55807e449c0ee016384 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h 3c7130d0613d3c8baef6b23bb63c6ee7a10ed21b - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h 774fd1e730d1d853bf97946f7ecd24c6648c7af4 - nvdisplay/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h 2476f128437c0520204e13a4ddd2239ff3f40c21 - nvdisplay/src/common/unix/common/inc/nv-float.h 881cbcc7ed39ea9198279136205dbe40142be35e - nvdisplay/src/common/unix/common/inc/nv_assert.h 1c947cfc8a133b00727104684764e5bb900c9d28 - nvdisplay/src/common/unix/common/inc/nv_mode_timings.h a8e49041c1b95431e604852ad0fa3612548e3c82 - nvdisplay/src/common/unix/common/inc/nv_dpy_id.h 83044eb5259200922f78ad3248fbc1d4de1ec098 - nvdisplay/src/common/unix/common/inc/nv_common_utils.h 5ef40af650eb65b2c87572a1bbfe655d8821f2d5 - nvdisplay/src/common/unix/common/utils/nv_memory_tracker.c bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - nvdisplay/src/common/unix/common/utils/unix_rm_handle.c 26f2a36442266c5d2664d509ecfd31094a83e152 - nvdisplay/src/common/unix/common/utils/nv_vasprintf.c e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - nvdisplay/src/common/unix/common/utils/nv_mode_timings_utils.c 667b361db93e35d12d979c47e4d7a68be9aa93b6 - nvdisplay/src/common/unix/common/utils/interface/nv_mode_timings_utils.h 07c675d22c4f0f4be6647b65b6487e2d6927c347 - nvdisplay/src/common/unix/common/utils/interface/nv_memory_tracker.h 8d9c4d69394b23d689a4aa6727eb3da1d383765a - nvdisplay/src/common/unix/common/utils/interface/unix_rm_handle.h 9e008270f277e243f9167ab50401602378a2a6e8 - nvdisplay/src/common/unix/common/utils/interface/nv_vasprintf.h e015e955a05908d4a2202213353eac89f1b80ff6 - nvdisplay/src/common/inc/nvSha1.h 8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - nvdisplay/src/common/inc/rmosxfac.h 764e5c4364922e3953b4db0411d1d3c3bdac99f4 - nvdisplay/src/common/inc/nvlog_defs.h ebccc5c2af2863509e957fe98b01d9a14d8b0367 - nvdisplay/src/common/inc/nv_list.h b85b49fc4ed38a241c79731a02b3b040a654a52a - nvdisplay/src/common/inc/nvctassert.h 3bf0416186ee90833c727f01cc891bd568ea9d0f - nvdisplay/src/common/inc/nvVer.h b4c5d759f035b540648117b1bff6b1701476a398 - nvdisplay/src/common/inc/nvCpuUuid.h 4282574b39d1bcaf394b63aca8769bb52462b89b - nvdisplay/src/common/inc/nvBinSegment.h a27eb14c54c6acb647a95c264b90e25f07fc757e - nvdisplay/src/common/inc/nvBldVer.h 62e510fa46465f69e9c55fabf1c8124bee3091c4 - nvdisplay/src/common/inc/nvHdmiFrlCommon.h a346380cebac17412b4efc0aef2fad27c33b8fb5 - nvdisplay/src/common/inc/nvlog_inc2.h 963aebc9ec7bcb9c445eee419f72289b21680cdd - nvdisplay/src/common/inc/hdmi_spec.h 5257e84f2048b01258c78cec70987f158f6b0c44 - nvdisplay/src/common/inc/nvlog_inc.h b58ed1b4372a5c84d5f3755b7090b196179a2729 - nvdisplay/src/common/inc/nv_speculation_barrier.h f59a2759281341e56372d3cb37b16715944dd8e1 - nvdisplay/src/common/inc/nvPNPVendorIds.h cf9d774335b85de8870320acce33e367af34d476 - nvdisplay/src/common/inc/nvUnixVersion.h 249d4f7317ce68c3ceb64e2b1ee257cc75eb002b - nvdisplay/src/common/inc/displayport/dpcd20.h a26df21c3cc3eeb395428101f11da68386e0d72b - nvdisplay/src/common/inc/displayport/dpcd14.h 96b9560d322f43a980db5d6cc5072e9e81fdb9d2 - nvdisplay/src/common/inc/displayport/displayport.h 8159b4189c577d545c1280d7d905a2dc2ba29fa7 - nvdisplay/src/common/inc/displayport/dpcd.h d2b4cc6228c4b13ef77e47bf30326826c5662ed4 - nvdisplay/src/common/inc/swref/published/nv_ref.h 06aa739230c00998e039b0104e5d73da85c322fe - nvdisplay/src/common/inc/swref/published/nv_arch.h 38edc89fd4148b5b013b9e07081ba1e9b34516ac - nvdisplay/src/common/inc/swref/published/turing/tu102/kind_macros.h 86a59440492fd6f869aef3509f0e64a492b4550d - nvdisplay/src/common/inc/swref/published/turing/tu102/dev_mmu.h 1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - nvdisplay/src/common/inc/swref/published/disp/v04_02/dev_disp.h f9311a35f375c7453d99fdde3876440b54d4cb5a - nvdisplay/src/common/inc/swref/published/disp/v03_00/dev_disp.h 2f60ba753549b232e1b995046a356dbe0eced04a - nvdisplay/src/common/shared/nvstatus/nvstatus.c 750ecc85242882a9e428d5a5cf1a64f418d59c5f - nvdisplay/src/common/displayport/inc/dp_object.h 0f747fdf03bebdcd86dbdf16d00ee2d044bc906c - nvdisplay/src/common/displayport/inc/dp_messages.h 80380945c76c58648756446435d615f74630f2da - nvdisplay/src/common/displayport/inc/dp_timeout.h cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - nvdisplay/src/common/displayport/inc/dp_merger.h 070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - nvdisplay/src/common/displayport/inc/dp_buffer.h 02b65d96a7a345eaa87042faf6dd94052235009c - nvdisplay/src/common/displayport/inc/dp_messageheader.h 78595e6262d5ab0e6232392dc0852feaf83c7585 - nvdisplay/src/common/displayport/inc/dp_auxbus.h e27519c72e533a69f7433638a1d292fb9df8772e - nvdisplay/src/common/displayport/inc/dp_crc.h 325818d0a4d1b15447923e2ed92c938d293dc079 - nvdisplay/src/common/displayport/inc/dp_hostimp.h b4d8c44957efc90ba97092987e6e43c48e85ac86 - nvdisplay/src/common/displayport/inc/dp_address.h 36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - nvdisplay/src/common/displayport/inc/dp_groupimpl.h 78c6d7d85b47636fbb21153425ef90c6d0b2d4e2 - nvdisplay/src/common/displayport/inc/dp_configcaps.h 39aece5465100489867001bf57446bcfc4999c24 - nvdisplay/src/common/displayport/inc/dp_evoadapter.h 01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - nvdisplay/src/common/displayport/inc/dp_guid.h cca426d571c6b01f7953180e2e550e55c629f0f4 - nvdisplay/src/common/displayport/inc/dp_auxretry.h 11487c992494f502d1c48ff00982998504336800 - nvdisplay/src/common/displayport/inc/dp_internal.h f6e1b0850f5ed0f23f263d4104523d9290bb8669 - nvdisplay/src/common/displayport/inc/dp_vrr.h 543efa25367763292067245cbc39c1382c35df77 - nvdisplay/src/common/displayport/inc/dp_discovery.h 07d22f84e6a386dad251761278a828dab64b6dd5 - nvdisplay/src/common/displayport/inc/dp_bitstream.h 3eea80c74a22de43b6edad21ea5873c791e093e2 - nvdisplay/src/common/displayport/inc/dp_mainlink.h 7974abf146f1f14cd3e3854ef63ddf52ebbeb222 - nvdisplay/src/common/displayport/inc/dp_deviceimpl.h 8f83883126b853c97e5859dafd98847ec54d36ac - nvdisplay/src/common/displayport/inc/dp_splitter.h 5bd3706ceea585df76a75dda7f9581b91ee8f998 - nvdisplay/src/common/displayport/inc/dp_tracing.h 4a098c4d09dedc33b86748d5fe9a30d097675e9f - nvdisplay/src/common/displayport/inc/dp_list.h 7b7d9a137027fbbedfc041465987fa4ed4198ce4 - nvdisplay/src/common/displayport/inc/dp_edid.h 379d3933c90eaf9c35a0bad2bd6af960a321465f - nvdisplay/src/common/displayport/inc/dp_wardatabase.h d876d77caef3541ae05f310857f3d32e642fba04 - nvdisplay/src/common/displayport/inc/dp_auxdefs.h e2075486b392d6b231f2f133922ac096ca4bc095 - nvdisplay/src/common/displayport/inc/dp_ringbuffer.h 5dff32bd1018e2c5c2540ea7fb571dbea596d5b1 - nvdisplay/src/common/displayport/inc/dp_regkeydatabase.h d1e8c84f279cb30978d32c784107c0247afa6e66 - nvdisplay/src/common/displayport/inc/dp_linkconfig.h e02e5621eaea52a2266a86dcd587f4714680caf4 - nvdisplay/src/common/displayport/inc/dp_linkedlist.h 2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - nvdisplay/src/common/displayport/inc/dp_timer.h 62d03d24af041276ba2abb96fa1634ae4f99ea8a - nvdisplay/src/common/displayport/inc/dp_connectorimpl.h aeadcb0bc061b5db0fdf8aa67c1b5703976aa946 - nvdisplay/src/common/displayport/inc/dp_connector.h 6e515f398e9ae1b603e49ec32576ccd0ce5d8828 - nvdisplay/src/common/displayport/inc/dp_messagecodings.h 3b74682e142e94b1c68bf619169f12e5805044bc - nvdisplay/src/common/displayport/inc/dp_watermark.h 020194b85245bad5de4dfe372a7ccb0c247d6ede - nvdisplay/src/common/displayport/inc/dptestutil/dp_testmessage.h 70b155b0da07a92ede884a9cec715f67e6b5c3e8 - nvdisplay/src/common/displayport/src/dp_list.cpp 37eabb1ab51cb38660eb24e294c63c8320750b96 - nvdisplay/src/common/displayport/src/dp_sst_edid.cpp fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - nvdisplay/src/common/displayport/src/dp_crc.cpp c70d946adb4029b3476873887488748162b88b0b - nvdisplay/src/common/displayport/src/dp_messagecodings.cpp a5df56b2cf8df9d4d8ab6fa2b3521649ef09384a - nvdisplay/src/common/displayport/src/dp_wardatabase.cpp d63fed0074b22584686ad4d0cdaa4388b42194d6 - nvdisplay/src/common/displayport/src/dp_watermark.cpp e874ffeaeb6deec57605bf91eaa2af116a9762bd - nvdisplay/src/common/displayport/src/dp_bitstream.cpp c62ef84471074a9ed428b4a03e644885989b0b83 - nvdisplay/src/common/displayport/src/dp_evoadapter.cpp 56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - nvdisplay/src/common/displayport/src/dp_vrr.cpp d991afdb694634e9df756184b5951739fc3fd0ab - nvdisplay/src/common/displayport/src/dp_auxretry.cpp 554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - nvdisplay/src/common/displayport/src/dp_discovery.cpp 66e91795dc65e1bc13c545a84556d200c8eb7bd5 - nvdisplay/src/common/displayport/src/dp_messages.cpp 719d2ddbfb8555636496cb5dd74ee6776059db92 - nvdisplay/src/common/displayport/src/dp_timer.cpp 38fe8122aba8a1bc5745d81192ec7fc75934dd0d - nvdisplay/src/common/displayport/src/dp_deviceimpl.cpp ac08ccd5c2e3fadf10ae53e46e582489d1579ed0 - nvdisplay/src/common/displayport/src/dp_edid.cpp 6fd536d1849ea4cce5d9b72d1dcbc1db9c818b4e - nvdisplay/src/common/displayport/src/dp_groupimpl.cpp 4803cde0fffcf89fed46d6deaeba5c96c669a908 - nvdisplay/src/common/displayport/src/dp_messageheader.cpp 9f31213ab8037d7bb18c96a67d2630d61546544a - nvdisplay/src/common/displayport/src/dp_mst_edid.cpp f56f92e32710b0342805b785d34ba1a9f2a54ed3 - nvdisplay/src/common/displayport/src/dp_guid.cpp 60994cb1131d4d37b2d3fce6cc59dfea5ebb4129 - nvdisplay/src/common/displayport/src/dp_connectorimpl.cpp f83b3c17e9f26651f12c8835a682abdd66aed3a2 - nvdisplay/src/common/displayport/src/dp_splitter.cpp 1543bbaba8f3e149239cf44be3c0d080c624d5ba - nvdisplay/src/common/displayport/src/dp_buffer.cpp fa4f4869d3d63c0180f30ae3736600a6627284c6 - nvdisplay/src/common/displayport/src/dp_merger.cpp a0d24a4bd71f999adbaa876168adef5a7d95f2b8 - nvdisplay/src/common/displayport/src/dp_configcaps.cpp fe8007b3d98dad71b17595ecb67af77b198827a0 - nvdisplay/src/common/displayport/src/dptestutil/dp_testmessage.cpp 439ef00ffa340bd1b6506970d154a33ca4b64b4a - nvdisplay/src/common/modeset/timing/nvt_dmt.c 890d8c2898a3277b0fed360301c2dc2688724f47 - nvdisplay/src/common/modeset/timing/nvt_util.c f531475d8b978bca5b79d39d729b0c9986fe7b36 - nvdisplay/src/common/modeset/timing/nvtiming.h b4813a5e854e75fb38f460e0c27dca8e1ce8dc21 - nvdisplay/src/common/modeset/timing/nvt_edid.c 0a04709ebdc4acb12038656c433e10c4e7096518 - nvdisplay/src/common/modeset/timing/nvt_edidext_displayid.c e66a20fc1579b0dd1392033089f97cf170e8cf10 - nvdisplay/src/common/modeset/timing/dpsdp.h b5bd3a58b499216e4fe0e0c9c99525b07ac237dc - nvdisplay/src/common/modeset/timing/nvt_dsc_pps.c f75b1d98895bdccda0db2d8dd8feba53b88180c5 - nvdisplay/src/common/modeset/timing/displayid.h 1997adbf2f6f5be7eb6c7a88e6660391a85d891b - nvdisplay/src/common/modeset/timing/nvt_gtf.c 95dae946088f21339299dae48eeafaab31b97b05 - nvdisplay/src/common/modeset/timing/nvtiming_pvt.h 58b68f1272b069bb7819cbe86fd9e19d8acd0571 - nvdisplay/src/common/modeset/timing/edid.h cfaa569ac3d63484c86e8a8d7a483dd849f96be8 - nvdisplay/src/common/modeset/timing/nvt_edidext_displayid20.c 3023a58fd19d32280607d4027b09fe51fdb7a096 - nvdisplay/src/common/modeset/timing/nvt_dsc_pps.h 4a2ad30f49ed92694b717a99ce7adeeb565e8a37 - nvdisplay/src/common/modeset/timing/nvt_edidext_861.c 28d7b753825d5f4a9402aff14488c125453e95c5 - nvdisplay/src/common/modeset/timing/nvt_tv.c 1290abde75d218ae24f930c3b011042a3f360c2e - nvdisplay/src/common/modeset/timing/displayid20.h 49df9034c1634d0a9588e5588efa832a71750a37 - nvdisplay/src/common/modeset/timing/nvt_cvt.c ba9e382b24f57caa9dcf1c26a60b1f2070b1b9dd - nvdisplay/src/common/modeset/timing/nvt_displayid20.c 443c0a4b17a0019e4de3032c93c5cac258529f01 - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_internal.h 15d54c86d78404639c7f151adc672e19472dcf4a - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt.c f669280a5e86ba51b691e2609fa7d8c223bd85dc - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_C671.c 7c2fe72426fa304315e169e91dc6c1c58b5422fd - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_0073.c d2c79c8a4e914519d653d1f14f706ec4a1f787e8 - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_9171.c 54a1b5e5aaf0848a72befc896ed12f1de433ad4f - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_9471.c 90e8ce7638a28cd781b5d30df565116dc1cea9e8 - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt.h 9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_9571.c 381e1b8aeaa8bd586c51db1f9b37d3634285c16a - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_class.h 5e12a290fc91202e4ba9e823b6d8457594ed72d3 - nvdisplay/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h 67db549636b67a32d646fb7fc6c8db2f13689ecc - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_9271.c e6d500269128cbd93790fe68fbcad5ba45c2ba7d - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_C371.c e3be7ba45506c42d2fca87e9da45db75ced750ca - nvdisplay/src/common/modeset/hdmipacket/nvhdmipkt_common.h b882497ae393bf66a728dae395b64ac53602a1a5 - nvdisplay/src/common/softfloat/nvidia/nv-softfloat.h be9407a273620c0ba619b53ed72d59d52620c3e4 - nvdisplay/src/common/softfloat/nvidia/platform.h 577821f706c7de4ca327c1e2fcc34161c96c89f3 - nvdisplay/src/common/softfloat/source/f64_to_i64_r_minMag.c 21a6232d93734b01692689258a3fdfbbf4ff089d - nvdisplay/src/common/softfloat/source/s_roundToUI32.c 29321080baa7eab86947ac825561fdcff54a0e43 - nvdisplay/src/common/softfloat/source/i32_to_f32.c dafa667ee5dd52c97fc0c3b7144f6b619406c225 - nvdisplay/src/common/softfloat/source/s_mulAddF64.c 108eec2abf1cddb397ce9f652465c2e52f7c143b - nvdisplay/src/common/softfloat/source/f64_roundToInt.c 513a7d1c3053fc119efcd8ae1bcc9652edc45315 - nvdisplay/src/common/softfloat/source/f32_lt.c d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - nvdisplay/src/common/softfloat/source/f32_to_i64_r_minMag.c 2db07bbb8242bc55a24ef483af6d648db0660de0 - nvdisplay/src/common/softfloat/source/f32_add.c c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - nvdisplay/src/common/softfloat/source/s_subMagsF64.c 5c1026617c588bcf5f1e59230bd5bb900600b9ac - nvdisplay/src/common/softfloat/source/f64_mul.c 5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - nvdisplay/src/common/softfloat/source/f64_to_ui64_r_minMag.c 6fa7493285fe2f7fdc0ac056a6367e90327905c2 - nvdisplay/src/common/softfloat/source/f32_sub.c da3b3f94a817909a3dc93ca5fa7675805c7979e0 - nvdisplay/src/common/softfloat/source/f64_isSignalingNaN.c d701741d8d6a92bb890e53deda1b795f5787f465 - nvdisplay/src/common/softfloat/source/f64_le.c baa7af4eea226140c26ffe6ab02a863d07f729fb - nvdisplay/src/common/softfloat/source/f64_eq_signaling.c 2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - nvdisplay/src/common/softfloat/source/f32_to_ui64.c 054b23a974fc8d0bab232be433c4e516e6c1250a - nvdisplay/src/common/softfloat/source/f64_lt_quiet.c dde685423af544e5359efdb51b4bf9457c67fa3b - nvdisplay/src/common/softfloat/source/f32_sqrt.c fb062ecbe62a1f5878fd47f0c61490f2bde279dd - nvdisplay/src/common/softfloat/source/s_roundToI32.c 8e58f0258218475616ff4e6317516d40ad475626 - nvdisplay/src/common/softfloat/source/f32_lt_quiet.c ab19c6b50c40b8089cb915226d4553d1aa902b0e - nvdisplay/src/common/softfloat/source/f64_to_i32_r_minMag.c 86fdc2472526375539216461732d1db6a9f85b55 - nvdisplay/src/common/softfloat/source/s_roundPackToF32.c 9266c83f3e50093cc45d7be6ab993a0e72af1685 - nvdisplay/src/common/softfloat/source/s_roundPackToF64.c 2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - nvdisplay/src/common/softfloat/source/ui64_to_f32.c 68843a93e1f46195243ef1164f611b759cf19d17 - nvdisplay/src/common/softfloat/source/f32_le_quiet.c 00ab2120f71117161d4f6daaa9b90a3036a99841 - nvdisplay/src/common/softfloat/source/f32_to_ui32.c d0f8f08c225b60d88b6358d344404ba9df3038ec - nvdisplay/src/common/softfloat/source/s_normSubnormalF32Sig.c 0108fe6f0d394ad72083aff9bb58507f97a0b669 - nvdisplay/src/common/softfloat/source/ui32_to_f64.c 7bc81f5bc894118c08bfd52b59e010bc068ed762 - nvdisplay/src/common/softfloat/source/ui32_to_f32.c 0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - nvdisplay/src/common/softfloat/source/s_roundToI64.c c3ce12c227d25bc0de48fbcf914fc208e2448741 - nvdisplay/src/common/softfloat/source/f64_sub.c b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - nvdisplay/src/common/softfloat/source/s_roundToUI64.c 29396b7c23941024a59d5ea06698d2fbc7e1a6ca - nvdisplay/src/common/softfloat/source/f64_to_i64.c ae25eea499b3ea5bdd96c905fd0542da11083048 - nvdisplay/src/common/softfloat/source/s_normRoundPackToF64.c b22876b0695f58ee56143c9f461f1dde32fefbf3 - nvdisplay/src/common/softfloat/source/f64_to_ui64.c b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - nvdisplay/src/common/softfloat/source/f32_le.c 1ff879eca2a273293b5cd6048419b2d2d8063b93 - nvdisplay/src/common/softfloat/source/f64_mulAdd.c 0e9694d551848d88531f5461a9b3b91611652e9a - nvdisplay/src/common/softfloat/source/f64_to_ui32_r_minMag.c 5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - nvdisplay/src/common/softfloat/source/f32_isSignalingNaN.c bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - nvdisplay/src/common/softfloat/source/f64_to_f32.c 1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - nvdisplay/src/common/softfloat/source/f64_rem.c 50daf9186bc5d0180d1453c957164b136d5ffc89 - nvdisplay/src/common/softfloat/source/f64_eq.c 09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - nvdisplay/src/common/softfloat/source/s_addMagsF32.c 9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - nvdisplay/src/common/softfloat/source/i64_to_f64.c fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - nvdisplay/src/common/softfloat/source/s_shiftRightJam128.c aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - nvdisplay/src/common/softfloat/source/f64_le_quiet.c 38bd00e9c4d2f1354c611404cca6209a6c417669 - nvdisplay/src/common/softfloat/source/s_countLeadingZeros64.c d9a86343e6cc75714f65f690082dd4b0ba724be9 - nvdisplay/src/common/softfloat/source/s_roundPackToF16.c 0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - nvdisplay/src/common/softfloat/source/f32_eq.c d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - nvdisplay/src/common/softfloat/source/f32_to_i32.c 296c40b0589536cb9af3231ad3dcd7f2baaa6887 - nvdisplay/src/common/softfloat/source/f64_lt.c 0d8e42636a3409a647291fdb388001c2b11bba07 - nvdisplay/src/common/softfloat/source/f32_to_f16.c ec1a797b11f6e846928a4a49a8756f288bda1dfa - nvdisplay/src/common/softfloat/source/i32_to_f64.c 729e790328168c64d65a1355e990274c249bbb3a - nvdisplay/src/common/softfloat/source/f32_to_i32_r_minMag.c 9a5b93459ace2da23964da98617d6b18006fab86 - nvdisplay/src/common/softfloat/source/s_countLeadingZeros8.c 84b0a01ba2a667eb28b166d45bd91352ead83e69 - nvdisplay/src/common/softfloat/source/i64_to_f32.c 4b37be398b3e73ae59245f03b2ba2394fc902b4d - nvdisplay/src/common/softfloat/source/s_normSubnormalF64Sig.c 6f83fa864007e8227ae09bb36a7fdc18832d4445 - nvdisplay/src/common/softfloat/source/f32_mul.c daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - nvdisplay/src/common/softfloat/source/f32_rem.c a94c8c2bd74633027e52e96f41d24714d8081eb4 - nvdisplay/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c 69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - nvdisplay/src/common/softfloat/source/f64_to_ui32.c 50b3147f8413f0595a4c3d6e6eeab84c1ffecada - nvdisplay/src/common/softfloat/source/s_normRoundPackToF32.c bbc70102b30f152a560eb98e7a1a4b11b9ede85e - nvdisplay/src/common/softfloat/source/f64_sqrt.c 760fd7c257a1f915b61a1089b2acb143c18a082e - nvdisplay/src/common/softfloat/source/s_addMagsF64.c ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - nvdisplay/src/common/softfloat/source/f32_mulAdd.c 4445b1fbbd507144f038fd939311ff95bc2cf5f1 - nvdisplay/src/common/softfloat/source/ui64_to_f64.c 871cb1a4037d7b4e73cb20ad18390736eea7ae36 - nvdisplay/src/common/softfloat/source/f32_to_ui64_r_minMag.c ce37cdce572a3b02d42120e81c4969b39d1a67b6 - nvdisplay/src/common/softfloat/source/f64_to_i32.c c29536f617d71fe30accac44b2f1df61c98a97dc - nvdisplay/src/common/softfloat/source/f64_div.c 54cbeb5872a86e822bda852ec15d3dcdad4511ce - nvdisplay/src/common/softfloat/source/f64_add.c e7890082ce426d88b4ec93893da32e306478c0d1 - nvdisplay/src/common/softfloat/source/s_approxRecipSqrt32_1.c 824383b03952c611154bea0a862da2b9e2a43827 - nvdisplay/src/common/softfloat/source/s_subMagsF32.c 00c612847b3bd227a006a4a2697df85866b80315 - nvdisplay/src/common/softfloat/source/s_mulAddF32.c 7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - nvdisplay/src/common/softfloat/source/softfloat_state.c e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - nvdisplay/src/common/softfloat/source/f32_to_f64.c 1484fc96d7731695bda674e99947280a86990997 - nvdisplay/src/common/softfloat/source/f32_to_i64.c 2960704c290f29aae36b8fe006884d5c4abcabb4 - nvdisplay/src/common/softfloat/source/f32_div.c 23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - nvdisplay/src/common/softfloat/source/f32_to_ui32_r_minMag.c fe06512577e642b09196d46430d038d027491e9f - nvdisplay/src/common/softfloat/source/f32_eq_signaling.c 5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - nvdisplay/src/common/softfloat/source/s_mul64To128.c e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - nvdisplay/src/common/softfloat/source/f32_roundToInt.c d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - nvdisplay/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - nvdisplay/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c 86cda6550cb02bbf595d1667573e4be83702a95e - nvdisplay/src/common/softfloat/source/8086-SSE/specialize.h 3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - nvdisplay/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c d152bc457b655725185bdff42b36bb96d6e6715e - nvdisplay/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c 1dd1b424087d9c872684df0c1b4063b077992d5f - nvdisplay/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c 252c816378fddab616b1f2a61e9fedd549224483 - nvdisplay/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c 21a11759ed2afd746a47c4d78b67640c2d052165 - nvdisplay/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c 0cbae7a5abc336331d460cbd3640d2cda02af434 - nvdisplay/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c 4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - nvdisplay/src/common/softfloat/source/include/softfloat_types.h 1ded4df85ff5fa904fa54c27d681265425be1658 - nvdisplay/src/common/softfloat/source/include/primitiveTypes.h 9645e179cf888bcd0e3836e8126b204b4b42b315 - nvdisplay/src/common/softfloat/source/include/softfloat.h de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - nvdisplay/src/common/softfloat/source/include/primitives.h f36c896cfa01f1de9f9420189319e4e00c7fc52a - nvdisplay/src/common/softfloat/source/include/internals.h 893c70c95809f463c7af6dc9c814527804fcdf53 - nvdisplay/src/nvidia/Makefile c5f16fdf43ca3d2845d120c219d1da11257072b0 - nvdisplay/src/nvidia/nv-kernel.ld 285ab886f5fad5caf3f6bd0b0c7102bd4c4300bd - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-reg.h 4750735d6f3b334499c81d499a06a654a052713d - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-caps.h 1b53bbf5f8452b8057ff2dd7828947a047db38d0 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv_escape.h fbcbb81ae14e8bfde0d665ad20f9cab9b0bbd9c3 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv.h aba0bd796d932fa19e8fad55ed683ae57d68bffb - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-priv.h 2f5fec803685c61c13f7955baaed056b5524652c - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h 1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - nvdisplay/src/nvidia/arch/nvalloc/unix/include/os_custom.h 499e72dad20bcc283ee307471f8539b315211da4 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h e3679844971ecc4447259fb1bdf4fafbbdff2395 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/osapi.h 40cb3c112bbcb6ae83a9186d0c9fa1857cf6a126 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/os-interface.h ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - nvdisplay/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h 9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h 1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h cc3b2163238b2a8acb7e3ca213fb1ae6c5f0a409 - nvdisplay/src/nvidia/arch/nvalloc/unix/include/osfuncs.h 49dc935d4475b572478c63324f0832c972a4277d - nvdisplay/src/nvidia/arch/nvalloc/unix/src/os.c 54b912b640bdcae42f38c41694eb20abcaad61a7 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c 21ac9d6932199ce0755dbead297eb03c9900f8c9 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c 9d9035afd7af31f30cdbf2d4c75e5e09180f0981 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/osunix.c b5b409625fde1b640e4e93276e35248f0fccfa4c - nvdisplay/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c 532366fd9a288a812eca78b92b304ba3625f8c0a - nvdisplay/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c 6ebda7ea5b17b7b9bfa9387fc838db9f0c3405a5 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/osinit.c 5940d8e83cd0014e3222952eab29eebaaad19b86 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/osapi.c fb5272f3d0e465aedbc99ddcabb1c6c428837a6e - nvdisplay/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c 690927567b5344c8030e2c52d91f824bb94e956c - nvdisplay/src/nvidia/arch/nvalloc/unix/src/registry.c 006e77a594ae98067059ad3d7e93821316859063 - nvdisplay/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c f134270af5ecd7c5ba91bf5228fe3166b101dd6e - nvdisplay/src/nvidia/arch/nvalloc/unix/src/escape.c d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - nvdisplay/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h ffd4f01212709e321d4097e424fe5d32038f5d8b - nvdisplay/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c 73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - nvdisplay/src/nvidia/generated/g_traceable_nvoc.h 213ebb4fdfa3c2f64b5f998e2ad990e448d4a104 - nvdisplay/src/nvidia/generated/g_nv_debug_dump_nvoc.h 3d3385445934719abda1fefd4eb0762937be0e61 - nvdisplay/src/nvidia/generated/g_client_nvoc.c 3f5a391895fc900396bae68761fe9b4dcb382ec0 - nvdisplay/src/nvidia/generated/g_event_buffer_nvoc.h d04adc777f547ae6d1369cf4c94963e5abf90b86 - nvdisplay/src/nvidia/generated/g_context_dma_nvoc.c 4fe5357eabd0c5e351fb965ceead308240f68eb1 - nvdisplay/src/nvidia/generated/g_objtmr_nvoc.h 4302502637f5c4146cb963801258444f2d8173e1 - nvdisplay/src/nvidia/generated/g_allclasses.h f4a5684d5a877b90c7ae7b66436117c6feb65f91 - nvdisplay/src/nvidia/generated/g_gpu_mgr_nvoc.h 2239839c8a780a87e786439a49ab63e25d25001a - nvdisplay/src/nvidia/generated/g_rmconfig_util.h 6fd6953e4ae0af707376a40ea0e4f3e70872be7b - nvdisplay/src/nvidia/generated/g_os_desc_mem_nvoc.h 8915f69e67e1f3a809a5479e36280df06ce8dd90 - nvdisplay/src/nvidia/generated/g_system_mem_nvoc.c b5ddae1e6960b13101aa38b2edc0610aed438ede - nvdisplay/src/nvidia/generated/g_gpu_nvoc.c 07fd5f5534a6d751107f582ba187c7a53a139954 - nvdisplay/src/nvidia/generated/g_rs_resource_nvoc.h 46c1a2066ead316ea69c60dc323bdb649bc11c0f - nvdisplay/src/nvidia/generated/g_binary_api_nvoc.c c4fde03d5939b0eef108fde9c2f10661568f22a9 - nvdisplay/src/nvidia/generated/g_mem_nvoc.h fb78615cde6323784f51d33f2acd61fd4030fee0 - nvdisplay/src/nvidia/generated/g_device_nvoc.c d614f90730e2ee78bc3aae47b4e7976500e166e7 - nvdisplay/src/nvidia/generated/g_io_vaspace_nvoc.h f9bdef39159a8475626a0edcbc3a53505a0ff80a - nvdisplay/src/nvidia/generated/g_os_private.h cb03502bf603c88b709ec803b60efd1d6f8e5ee1 - nvdisplay/src/nvidia/generated/g_rpc-structures.h 7c1b36cca9e8bf1fe18284685a6a80620df348cb - nvdisplay/src/nvidia/generated/g_client_nvoc.h f6f40d568bcf2ae89547ad054f9b5357bac366ab - nvdisplay/src/nvidia/generated/g_event_nvoc.h e0b8f64c042dcbb6340552cb3517dabdeb490f1b - nvdisplay/src/nvidia/generated/g_hal_nvoc.h 2c28d729456749f16ae03fb48b1e416706762805 - nvdisplay/src/nvidia/generated/g_resource_fwd_decls_nvoc.h cc71518b4151dc2ee0592bbd2866d437043d0e1a - nvdisplay/src/nvidia/generated/g_kernel_head_nvoc.h 59a87763c6abdc54828f2785a7d90e43e607bc87 - nvdisplay/src/nvidia/generated/g_disp_inst_mem_nvoc.c d792fbb20b6ca5f2d62addf6a94b0c5027ae15fe - nvdisplay/src/nvidia/generated/g_subdevice_nvoc.h 6f3fc9676df77fa24c49140331b87ed5988ed57c - nvdisplay/src/nvidia/generated/rmconfig.h f9bdef39159a8475626a0edcbc3a53505a0ff80a - nvdisplay/src/nvidia/generated/g_os_hal.h 285af0d0517cb191387a05ad596f74291ec81737 - nvdisplay/src/nvidia/generated/g_mem_desc_nvoc.h 499a3d9c61a86b667cc77cf8653a71f7fe85078a - nvdisplay/src/nvidia/generated/g_nv_name_released.h 958d9a2cddc91edfafb5c2f3d9622443ac49a6ef - nvdisplay/src/nvidia/generated/g_objtmr_nvoc.c cba2c17804f6f2062dc5d75583e4a03e03016d1d - nvdisplay/src/nvidia/generated/g_disp_capabilities_nvoc.h 53b2c39666e1da206d44d69d54009f20440503bc - nvdisplay/src/nvidia/generated/g_eng_state_nvoc.h 757b3ecf94d0c8914a32c4bd302f8ccfa4027856 - nvdisplay/src/nvidia/generated/g_syncpoint_mem_nvoc.c 14450b18d002d4e1786d4630ef4f1994c07ef188 - nvdisplay/src/nvidia/generated/g_odb.h bfabd5155af3172e1c0a5a0b66721ff830c7b68f - nvdisplay/src/nvidia/generated/g_hypervisor_nvoc.h d405e01478d26ea99cc0012fa2d6e0021bbe6213 - nvdisplay/src/nvidia/generated/g_gpu_db_nvoc.c 07a37ff685e68a703455e0ed7db7940697487ed2 - nvdisplay/src/nvidia/generated/g_system_nvoc.c 7b0201852361118f277ee7cc6dd16212c0192f71 - nvdisplay/src/nvidia/generated/g_gpu_group_nvoc.h 21e3cf689d84b1a28e11f66cc68a0bc6713108b0 - nvdisplay/src/nvidia/generated/g_rs_server_nvoc.c 3a5457a216d197af8f120c660690a55ee44bdd8e - nvdisplay/src/nvidia/generated/g_generic_engine_nvoc.c ceb4dd72148dfe4a0581631147e8d7636abfd61f - nvdisplay/src/nvidia/generated/g_chips2halspec_nvoc.h ad50b3dbe1685eefe51c4fc296f3eade70789dfb - nvdisplay/src/nvidia/generated/g_gpu_resource_nvoc.h 8e0e60f6d30bbed679c43b4997875989314ee88c - nvdisplay/src/nvidia/generated/g_dce_client_nvoc.c dec0f585ca46dc8e1aae49c8ea58db5a415de65c - nvdisplay/src/nvidia/generated/g_rpc-message-header.h bd048add5f0781d90b55a5293881a2f59ace3070 - nvdisplay/src/nvidia/generated/g_binary_api_nvoc.h 3f581df19314b273244c4c42ea915ec8ef0d8ce2 - nvdisplay/src/nvidia/generated/g_rs_client_nvoc.h 871fd0260ab9c164b8f6a7d1aba4563af622f1ac - nvdisplay/src/nvidia/generated/g_disp_channel_nvoc.h b86536778197748c707c3e9e4c73c5fbcb037e32 - nvdisplay/src/nvidia/generated/g_generic_engine_nvoc.h cd833a822c1ce96c79135ba7221d24f347ceadb1 - nvdisplay/src/nvidia/generated/g_mem_mgr_nvoc.h 0dae533422e24d91a29c82d7be619160bbb6f6be - nvdisplay/src/nvidia/generated/g_context_dma_nvoc.h 6263c1ceca0797d34a102f9846acd1fdef06fb60 - nvdisplay/src/nvidia/generated/g_resserv_nvoc.h 1bdccdbabf5ae52fd65b829c35079bb7a8734939 - nvdisplay/src/nvidia/generated/g_tmr_nvoc.c 410a759c949904b7ae1eecafb31143fad579c0a1 - nvdisplay/src/nvidia/generated/g_rs_client_nvoc.c edead99d125425ddf8f2fa4e4261b8cc3bf566fc - nvdisplay/src/nvidia/generated/g_standard_mem_nvoc.c 05cb2fed8648f07b54dc2e8bacbafb323ea8262e - nvdisplay/src/nvidia/generated/g_standard_mem_nvoc.h f8e842add67dc070cc011ea103fc56cfd81c8b9a - nvdisplay/src/nvidia/generated/g_chips2halspec_nvoc.c df070e15630a11b2f4b64d52228fa5a6e7ab2aa9 - nvdisplay/src/nvidia/generated/g_gpu_halspec_nvoc.h cc635daf3d7a9a176580951841b82e9eb0d6f5ad - nvdisplay/src/nvidia/generated/g_kernel_head_nvoc.c 42d784e8b478bbf48293a805aa227f0abdf1923b - nvdisplay/src/nvidia/generated/g_event_nvoc.c a016a7d8e07389736c388cb973f3b2a177ea917d - nvdisplay/src/nvidia/generated/g_disp_capabilities_nvoc.c 59c3612a596ad6b996c9d1506f9893bd1b5effee - nvdisplay/src/nvidia/generated/g_gpu_mgr_nvoc.c de97c5afdc34cb9aff23c3ba166e21f660cf1f47 - nvdisplay/src/nvidia/generated/g_hal.h 41784541b2e9ee778b52e686288fe492c0276fec - nvdisplay/src/nvidia/generated/g_hal_mgr_nvoc.c 1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - nvdisplay/src/nvidia/generated/g_rmconfig_util.c fbcbeb92e46ba11ac26c04c9688b3ffcf10f5c53 - nvdisplay/src/nvidia/generated/g_prereq_tracker_nvoc.h e50c91a674508b23b072e0dd2edbf743f24b333d - nvdisplay/src/nvidia/generated/g_object_nvoc.c 376572489e0d4211663da22d5b0de7c7e740fb29 - nvdisplay/src/nvidia/generated/g_hal_mgr_nvoc.h 73c598515eb7985c8f4cace0946ec9613960be6c - nvdisplay/src/nvidia/generated/g_gpu_group_nvoc.c e839f8a5ebef5f28818bb5824bd7c52320db9a74 - nvdisplay/src/nvidia/generated/g_disp_sf_user_nvoc.h ab79a1418b65b9d65081456583169f516dd510c9 - nvdisplay/src/nvidia/generated/g_event_buffer_nvoc.c 23d16b4534103f24fac5bb86eb8bab40e5bcba57 - nvdisplay/src/nvidia/generated/g_hda_codec_api_nvoc.c 7523c2ee9228ad0e2fb3566b23b9720d7896afae - nvdisplay/src/nvidia/generated/g_eng_state_nvoc.c ca042cfcdfe8cc8a141f8bb5c9e6c05d8a71b707 - nvdisplay/src/nvidia/generated/g_hda_codec_api_nvoc.h c3b4c6a1b90a1547e229bb2973eb19c01e1d0055 - nvdisplay/src/nvidia/generated/g_dce_client_nvoc.h e48b8b6ba9da5630a7ade526acbb94e50d9b636d - nvdisplay/src/nvidia/generated/g_vaspace_nvoc.h cb02e66e5fc06aa340ab460c977961701e9ba295 - nvdisplay/src/nvidia/generated/g_subdevice_nvoc.c 93f9738c0e8aa715592306ddf023adf6b548dcc4 - nvdisplay/src/nvidia/generated/g_nvh_state.h fa785f8138598af783aefecf10b141d524e6bb42 - nvdisplay/src/nvidia/generated/g_virt_mem_mgr_nvoc.c 162777624d03af2f17dfdc28bc35143e2ec6cdee - nvdisplay/src/nvidia/generated/g_os_nvoc.c 3b0e038829647cfe0d8807579db33416a420d1d2 - nvdisplay/src/nvidia/generated/g_chips2halspec.h b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - nvdisplay/src/nvidia/generated/g_journal_nvoc.h 81a6a28692f50efeebecad125de0585dd711ff36 - nvdisplay/src/nvidia/generated/g_device_nvoc.h 0b15dd4515c5e436a659883a48e62bf3c68bf439 - nvdisplay/src/nvidia/generated/g_gpu_nvoc.h b07c2c5e8df4de2bb9d242fd1606f1a57b8a742d - nvdisplay/src/nvidia/generated/g_io_vaspace_nvoc.c d32d0b65f5f76cb56ca7cd83c0adfe5cb5330924 - nvdisplay/src/nvidia/generated/g_resource_nvoc.h 40c937ca657bda9c0b67bd24c5047d39e596c16c - nvdisplay/src/nvidia/generated/g_disp_channel_nvoc.c 801eb295d07258ad70b99cb0fe85f3421690e0c4 - nvdisplay/src/nvidia/generated/g_rmconfig_private.h 182602832a033b3e2d5f88d4ba8febe63eeb2f9e - nvdisplay/src/nvidia/generated/g_client_resource_nvoc.c 65d1ace1e68c9b39cce6db61aa8b86ee47a0ae4b - nvdisplay/src/nvidia/generated/g_gpu_halspec_nvoc.c b29061454e7d8daa0cef0787f12726d105faf5c4 - nvdisplay/src/nvidia/generated/g_gpu_resource_nvoc.c c370a103a4c1c9cf2df3763988e77ef8f7bc6afb - nvdisplay/src/nvidia/generated/g_gpu_db_nvoc.h 09597f23d6a5440258656be81e7e6709390128f8 - nvdisplay/src/nvidia/generated/g_hal_private.h 88d336f88c9b72ec2c1352d4ebe00c0831eafbca - nvdisplay/src/nvidia/generated/g_sdk-structures.h 12776c69191b583ffcf0914697cf41802f52ef01 - nvdisplay/src/nvidia/generated/g_hal_archimpl.h a6174ad345cfdf926cbb4c86c7e8eeadfccb0ddf - nvdisplay/src/nvidia/generated/g_gpu_class_list.c 0f3140b5eae77a6055f32a91cb13b026bbb23905 - nvdisplay/src/nvidia/generated/g_kern_disp_nvoc.h 6124890a54e529dff8b9d6ecf8f4bebe1e10a8a2 - nvdisplay/src/nvidia/generated/g_os_nvoc.h e449382e19e4dcfcf0aec0babe5a1c8ce2f4249b - nvdisplay/src/nvidia/generated/g_kern_disp_nvoc.c 2ab6933e07a84c64dfcbeef3b3f4e3f14249d8c8 - nvdisplay/src/nvidia/generated/g_tmr_nvoc.h 9646d1c4d472ad800c7c93eec15cc03dd9201073 - nvdisplay/src/nvidia/generated/g_disp_objs_nvoc.h da3cc08f12ccee23bcb1c0d0c757b8bbcb81e4fd - nvdisplay/src/nvidia/generated/g_rs_server_nvoc.h ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - nvdisplay/src/nvidia/generated/g_hal_register.h c46cae4a17181c48bafc01237b83537df61c41ae - nvdisplay/src/nvidia/generated/g_hal_nvoc.c 4b9f2ee66b59181f226e1af5087db6ea80f1ee27 - nvdisplay/src/nvidia/generated/g_virt_mem_mgr_nvoc.h 7bb406aa863430507bdf07b5f3e519c0d756220a - nvdisplay/src/nvidia/generated/g_rs_resource_nvoc.c fa5e1c6001e60f77415d0a8f87c8b548b12e1217 - nvdisplay/src/nvidia/generated/g_mem_mgr_nvoc.c 47b7744ddd01b821bf2fd25fdb25c8d6d55ee01d - nvdisplay/src/nvidia/generated/g_prereq_tracker_nvoc.c f42bfa3b5a801358d30f852625d8456290550f46 - nvdisplay/src/nvidia/generated/g_disp_inst_mem_nvoc.h 76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - nvdisplay/src/nvidia/generated/g_traceable_nvoc.c 0269da77a8db8efde1debc8236f2b3de2cd2597e - nvdisplay/src/nvidia/generated/g_eng_desc_nvoc.h abda8536d885be1422810c184b936bbc880972eb - nvdisplay/src/nvidia/generated/g_os_desc_mem_nvoc.c ac842d9de5eae74ef02b0a75259fb016b80c6eac - nvdisplay/src/nvidia/generated/g_disp_objs_nvoc.c 205490d6651110f28009e752fa286f818bed22fb - nvdisplay/src/nvidia/generated/g_syncpoint_mem_nvoc.h e3c4822ac998ab5c7946919c85011f6172dc35ee - nvdisplay/src/nvidia/generated/g_mem_nvoc.c b82e5db65ad41764f456d6f924c89d76c165e48d - nvdisplay/src/nvidia/generated/g_system_nvoc.h 133e94f73c781709f407b03d8cdfdd8865c39b4b - nvdisplay/src/nvidia/generated/g_disp_sf_user_nvoc.c e0988b45cf712f1a7662b6f822eaed3ffd9938f3 - nvdisplay/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h 079ac6d2a90bd2fc9413e092a729202dbc5f724a - nvdisplay/src/nvidia/generated/g_system_mem_nvoc.h 4f4acfdefc7b9a0cdfe2d5840cc18c9c33366053 - nvdisplay/src/nvidia/generated/g_object_nvoc.h 2b49950ba8f540ed4231c3334810edbb212bb859 - nvdisplay/src/nvidia/generated/g_client_resource_nvoc.h 87a5ae8e07103074020ba052ca45ab39e918d3bd - nvdisplay/src/nvidia/generated/g_resource_nvoc.c ac3965eea078f1998c3a3041f14212578682e599 - nvdisplay/src/nvidia/generated/g_vaspace_nvoc.c 63e9d0416d5ca1fdf547b5fba9ec76e54690c9dc - nvdisplay/src/nvidia/generated/g_ref_count_nvoc.h fff3ebc8527b34f8c463daad4d20ee5e33321344 - nvdisplay/src/nvidia/inc/lib/ref_count.h 04dba2b7a6a360f3e855a7d6a7484ddcdfb90c19 - nvdisplay/src/nvidia/inc/lib/base_utils.h f8d9eb5f6a6883de962b63b4b7de35c01b20182f - nvdisplay/src/nvidia/inc/lib/protobuf/prb.h 601edb7333b87349d791d430f1cac84fb6fbb919 - nvdisplay/src/nvidia/inc/lib/zlib/inflate.h 9255fff39d7422ca4a56ba5ab60866779201d3e8 - nvdisplay/src/nvidia/inc/libraries/poolalloc.h 8dd7f2d9956278ed036bbc288bff4dde86a9b509 - nvdisplay/src/nvidia/inc/libraries/eventbufferproducer.h 1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - nvdisplay/src/nvidia/inc/libraries/nvoc/utility.h 5cadc87ba685991c7d4c6d453dcc9a2cca4398bf - nvdisplay/src/nvidia/inc/libraries/nvoc/prelude.h 62a18f19f79512ebccdf286068e0b557c7926e13 - nvdisplay/src/nvidia/inc/libraries/nvoc/runtime.h 00433b51c4d6254fd4dfc3dcd9b4ad59e485e7c0 - nvdisplay/src/nvidia/inc/libraries/nvoc/object.h 664ff0e10e893923b70425fa49c9c48ed0735573 - nvdisplay/src/nvidia/inc/libraries/nvoc/rtti.h 56b8bae7756ed36d0831f76f95033f74eaab01db - nvdisplay/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b795f5cb77ecd2cc407102900b63977cfb34bbfd - nvdisplay/src/nvidia/inc/libraries/ioaccess/ioaccess.h ba3c81e9eae32eefbf81818b48fdf6ccd7e73163 - nvdisplay/src/nvidia/inc/libraries/utils/nvmacro.h e53d5fc9b66dbec4c947224050866cec30b2f537 - nvdisplay/src/nvidia/inc/libraries/utils/nvrange.h 167f49cccc912430bb6b3cb77395f665a32cc8be - nvdisplay/src/nvidia/inc/libraries/utils/nvbitvector.h 398e4cd63852a18da6e42b920eacd927a2c38bc0 - nvdisplay/src/nvidia/inc/libraries/utils/nv_enum.h 18321894aa7631b491ea39edc2d45d1028cdc9c6 - nvdisplay/src/nvidia/inc/libraries/utils/nvprintf.h 1ed5d8ae82f37112b163187fa48d2720957e6bdf - nvdisplay/src/nvidia/inc/libraries/utils/nvassert.h 65a237b66732aafe39bc4a14d87debd2b094fb83 - nvdisplay/src/nvidia/inc/libraries/containers/map.h a28ab42de95e4878fb46e19d7b965c23f92b3213 - nvdisplay/src/nvidia/inc/libraries/containers/btree.h 4cd6b110470da3aee29e999e096ca582104fab21 - nvdisplay/src/nvidia/inc/libraries/containers/queue.h 3924b67e6d63e9a15876331c695daaf679454b05 - nvdisplay/src/nvidia/inc/libraries/containers/list.h 1dacc1c1efc757c12e4c64eac171474a798b86fd - nvdisplay/src/nvidia/inc/libraries/containers/eheap_old.h c9e75f7b02241ededa5328a4f559e70dec60d159 - nvdisplay/src/nvidia/inc/libraries/containers/type_safety.h 969cbac56935a80fafd7cceff157b27e623f9429 - nvdisplay/src/nvidia/inc/libraries/containers/multimap.h 7239704e6fe88b9d75984fb5e9f4b5706502d7f3 - nvdisplay/src/nvidia/inc/libraries/nvlog/nvlog_printf.h e08146f5de1596f5337c49cfbe180e30e880dedb - nvdisplay/src/nvidia/inc/libraries/nvlog/nvlog.h d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - nvdisplay/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h bdb558ee8f782e6be06fc262820f6bd9ce75bd51 - nvdisplay/src/nvidia/inc/libraries/tls/tls.h 3dcee4e110f4c571e7f49fae2f2d0630d008a906 - nvdisplay/src/nvidia/inc/libraries/nvport/nvport.h 2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - nvdisplay/src/nvidia/inc/libraries/nvport/safe.h b93c2532babf176f7b91735682e7d7cdc41f96f8 - nvdisplay/src/nvidia/inc/libraries/nvport/debug.h 147d47ef4bd860394d1d8ae82c68d97887e2898b - nvdisplay/src/nvidia/inc/libraries/nvport/core.h 6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - nvdisplay/src/nvidia/inc/libraries/nvport/thread.h 3ac7ddf3d402f3fd20cffe9d4e93f457de319605 - nvdisplay/src/nvidia/inc/libraries/nvport/sync.h a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - nvdisplay/src/nvidia/inc/libraries/nvport/util.h 3e656d5ed1f5df898ec444921ce77a40ead66b28 - nvdisplay/src/nvidia/inc/libraries/nvport/atomic.h 46345715dde843be2890b33f191b2f3b69385e0d - nvdisplay/src/nvidia/inc/libraries/nvport/memory.h 6ad1beaa2783a57330240d47b373930cd36ca5d0 - nvdisplay/src/nvidia/inc/libraries/nvport/crypto.h 22420ad669a9809602f111385b7840556e58ecff - nvdisplay/src/nvidia/inc/libraries/nvport/cpu.h 2805fad632acad045044e0b8417de88032177300 - nvdisplay/src/nvidia/inc/libraries/nvport/string.h 23afbd04f4e4b3301edcfdec003c8e936d898e38 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h 2a76929dc6b0e8624d02002600bc454cc851dee4 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h a8c9b83169aceb5f97d9f7a411db449496dc18f6 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/util_generic.h 600ad8781585e87df49ab1aaa39a07c8e8de74f5 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h 31f2042e852f074970644903335af5ffa2b59c38 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h 1d6a239ed6c8dab1397f056a81ff456141ec7f9c - nvdisplay/src/nvidia/inc/libraries/nvport/inline/util_valist.h aafca30178f49676f640be9c6d34f623a3e3a9a4 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/safe_generic.h 0747ee16c7e6c726f568867d0fbbad411c8795c8 - nvdisplay/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h eedda5c4b0611c3b95f726b0a2db4b0a23b7b1cf - nvdisplay/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h 2dec1c73507f66736674d203cc4a00813ccb11bc - nvdisplay/src/nvidia/inc/libraries/resserv/rs_domain.h 89ece4711626bf1e4197c69bd5754e2798214d76 - nvdisplay/src/nvidia/inc/libraries/resserv/resserv.h a0d3d164eb92280353cdc4458d2561aae8a68c1d - nvdisplay/src/nvidia/inc/libraries/resserv/rs_server.h bacdb2c1a1dbf182a0a3be15efa0a5f83365118f - nvdisplay/src/nvidia/inc/libraries/resserv/rs_resource.h 841ddca998b570feb1d59b50d644c8f2b59ae8e9 - nvdisplay/src/nvidia/inc/libraries/resserv/rs_client.h cd033fe116a41285a979e629a2ee7b11ec99369f - nvdisplay/src/nvidia/inc/libraries/resserv/rs_access_rights.h df174d6b4f718ef699ca6f38c16aaeffa111ad3c - nvdisplay/src/nvidia/inc/libraries/resserv/rs_access_map.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - nvdisplay/src/nvidia/inc/os/dce_rm_client_ipc.h c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - nvdisplay/src/nvidia/inc/kernel/diagnostics/traceable.h fd780f85cb1cd0fd3914fa31d1bd4933437b791d - nvdisplay/src/nvidia/inc/kernel/diagnostics/tracer.h 7e75b5d99376fba058b31996d49449f8fe62d3f0 - nvdisplay/src/nvidia/inc/kernel/diagnostics/profiler.h 7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - nvdisplay/src/nvidia/inc/kernel/diagnostics/journal.h b259f23312abe56d34a8f0da36ef549ef60ba5b0 - nvdisplay/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h 3a28bf1692efb34d2161907c3781401951cc2d4f - nvdisplay/src/nvidia/inc/kernel/diagnostics/journal_structs.h 8ef620afdf720259cead00d20fae73d31e59c2f7 - nvdisplay/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h e5b881419bc00d925eba9f8493f6b36cf3ce7ca7 - nvdisplay/src/nvidia/inc/kernel/os/os_stub.h 408c0340350b813c3cba17fd36171075e156df72 - nvdisplay/src/nvidia/inc/kernel/os/os.h af25180a08db4d5d20afd09f948b15d8c4d2d738 - nvdisplay/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h c8496199cd808ed4c79d8e149961e721ad96714e - nvdisplay/src/nvidia/inc/kernel/os/capability.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - nvdisplay/src/nvidia/inc/kernel/os/nv_memory_type.h 497492340cea19a93b62da69ca2000b811c8f5d6 - nvdisplay/src/nvidia/inc/kernel/rmapi/event_buffer.h 5f60ac544252b894ac7ecc0c6dc4446e6275eae5 - nvdisplay/src/nvidia/inc/kernel/rmapi/rmapi.h b4bae9ea958b4d014908459e08c93319784c47dd - nvdisplay/src/nvidia/inc/kernel/rmapi/event.h 99a27d87c7f1487f8df5781d284c2e9a83525892 - nvdisplay/src/nvidia/inc/kernel/rmapi/binary_api.h 61e3704cd51161c9804cb168d5ce4553b7311973 - nvdisplay/src/nvidia/inc/kernel/rmapi/resource.h 2baec15f4c68a9c59dd107a0db288e39914e6737 - nvdisplay/src/nvidia/inc/kernel/rmapi/client.h ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - nvdisplay/src/nvidia/inc/kernel/rmapi/exports.h f19dad1746e639d866c700c2f871fcc0144f2e5e - nvdisplay/src/nvidia/inc/kernel/rmapi/control.h c9cb08c7c73c0bdd75a320640d16bf4b4defe873 - nvdisplay/src/nvidia/inc/kernel/rmapi/mapping_list.h 4453fe6463e3155063f2bdbf36f44697606a80a5 - nvdisplay/src/nvidia/inc/kernel/rmapi/client_resource.h f3028fbcafe73212a94d295951122b532ff5445b - nvdisplay/src/nvidia/inc/kernel/rmapi/rs_utils.h 255c28b9bd27098382bace05af3ad7f195d12895 - nvdisplay/src/nvidia/inc/kernel/rmapi/rmapi_utils.h a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - nvdisplay/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h f1713ecc0b3e58e46c346409dbf4630aa6f7f3ed - nvdisplay/src/nvidia/inc/kernel/rmapi/param_copy.h 2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - nvdisplay/src/nvidia/inc/kernel/rmapi/alloc_size.h 5e9928552086947b10092792db4a8c4c57a84adf - nvdisplay/src/nvidia/inc/kernel/platform/acpi_common.h 2f05394872ffa95d700b7822489fa59f74ad5819 - nvdisplay/src/nvidia/inc/kernel/platform/sli/sli.h f929d43974893cd155ab2f5f77606f0040fe3e39 - nvdisplay/src/nvidia/inc/kernel/core/locks.h bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - nvdisplay/src/nvidia/inc/kernel/core/hal.h cbfff1f06eecc99fb5a1c82d43397043058f02fc - nvdisplay/src/nvidia/inc/kernel/core/printf.h 457c02092adfc1587d6e3cd866e28c567acbc43a - nvdisplay/src/nvidia/inc/kernel/core/info_block.h bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - nvdisplay/src/nvidia/inc/kernel/core/core.h 37f267155ddfc3db38f110dbb0397f0463d055ff - nvdisplay/src/nvidia/inc/kernel/core/strict.h b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - nvdisplay/src/nvidia/inc/kernel/core/hal_mgr.h ed496ab6e8b64d3398f929146e908c5a453a03d9 - nvdisplay/src/nvidia/inc/kernel/core/prelude.h b319914c97f9978488e8fb049d39c72ed64fd4d2 - nvdisplay/src/nvidia/inc/kernel/core/thread_state.h b5859c7862fb3eeb266f7213845885789801194a - nvdisplay/src/nvidia/inc/kernel/core/system.h ce3302c1890e2f7990434f7335cb619b12dee854 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h cf3d1427394c425c543e253adf443192ca613762 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_access.h 1938fd2511213c8003864d879cf1c41ae1169a5f - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_uuid.h bf894a769c46d5d173e3875cd9667bb3fe82feb9 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_timeout.h f17b704f2489ffedcc057d4a6da77c42ece42923 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_resource.h 6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h 426c6ab6cecc3b1ba540b01309d1603301a86db1 - nvdisplay/src/nvidia/inc/kernel/gpu/eng_desc.h c33ab6494c9423c327707fce2bcb771328984a3c - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_halspec.h 61c7d3ac2dc61ee81abd743a6536a439592ee162 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu_child_list.h 0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - nvdisplay/src/nvidia/inc/kernel/gpu/eng_state.h 76b24227c65570898c19e16bf35b2cad143f3d05 - nvdisplay/src/nvidia/inc/kernel/gpu/gpu.h efc50bb2ff6ccf1b7715fd413ca680034920758e - nvdisplay/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h 24d01769b39a6dd62574a95fad64443b05872151 - nvdisplay/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h 576216219d27aa887beeccefc22bcead4d1234d7 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/kern_disp.h 277a2719f8c063037c6a9ed55ade2b1cb17f48ae - nvdisplay/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h 51a209575d3e3fe8feb7269ece7df0846e18ca2a - nvdisplay/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h 61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h be7da8d1106ee14ff808d86abffb86794299b2df - nvdisplay/src/nvidia/inc/kernel/gpu/disp/disp_objs.h 74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/disp_channel.h b39826404d84e0850aa3385691d8dde6e30d70d4 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h 9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h 5179f01acf7e9e251552dc17c0dcd84f7d341d82 - nvdisplay/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h ccca322d29ae171ee81c95d58e31f1c109429ae7 - nvdisplay/src/nvidia/inc/kernel/gpu/gsp/message_queue.h 1e3bebe46b7f2f542eedace554a4156b3afb51f1 - nvdisplay/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h 97d0a067e89251672f191788abe81cf26dcb335f - nvdisplay/src/nvidia/inc/kernel/gpu/device/device.h 889ba18a43cc2b5c5e970a90ddcb770ce873b785 - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h 6756126ddd616d6393037bebf371fceacaf3a9f1 - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h a29f55d5fbc90dade83df3ef3263018633675284 - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h 20416f7239833dcaa743bbf988702610e9251289 - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h b52e6a0499640e651aa4200b2c8a1653df04a420 - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h 82abc2458910250c1a912e023f37e87c1c9bbb9e - nvdisplay/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - nvdisplay/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h 2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - nvdisplay/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h 5b151d0d97b83c9fb76b76c476947f9e15e774ad - nvdisplay/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - nvdisplay/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h ea32018e3464bb1ac792e39227badf482fa2dc67 - nvdisplay/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h 02d6a37ef1bb057604cb98a905fa02429f200c96 - nvdisplay/src/nvidia/inc/kernel/mem_mgr/mem.h a5f49a031db4171228a27482d091283e84632ace - nvdisplay/src/nvidia/inc/kernel/mem_mgr/system_mem.h d15991bc770c5ab41fe746995294c5213efa056b - nvdisplay/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h 5ae08b2077506cbc41e40e1b3672e615ce9d910f - nvdisplay/src/nvidia/inc/kernel/mem_mgr/vaspace.h 0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - nvdisplay/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h 4c386104eaead66c66df11258c3f1182b46e96ee - nvdisplay/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h 1a08e83fd6f0a072d6887c60c529e29211bcd007 - nvdisplay/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h 2d4afabd63699feec3aea5e89601db009fc51a08 - nvdisplay/src/nvidia/inc/kernel/mem_mgr/standard_mem.h 7df66a87c9498ae73c986e60fcb9cb1cbcd19e19 - nvdisplay/src/nvidia/kernel/inc/objrpc.h 1feab39692ea8796ac7675f4780dfd51e6e16326 - nvdisplay/src/nvidia/kernel/inc/objtmr.h 0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - nvdisplay/src/nvidia/kernel/inc/tmr.h 961ed81de50e67eadf163a3a8008ce1fde1d880c - nvdisplay/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h 4db7387cc1ce08ccc62404b80b19c7f1b685e746 - nvdisplay/src/nvidia/kernel/inc/vgpu/rpc.h e4d88af4eb51d32288f913d90e490e329884970b - nvdisplay/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h 28d6a6ae495d9bc032c084980ebf5d94448bcf29 - nvdisplay/src/nvidia/kernel/inc/vgpu/rpc_headers.h 31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - nvdisplay/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h 78cbb6428372c25eba0ccf8c08e7d36d18e4bae8 - nvdisplay/src/nvidia/src/lib/base_utils.c 6d5915924b4e26a5e7592427e34b77596162d0fe - nvdisplay/src/nvidia/src/lib/zlib/inflate.c 8a4e2aec6fc01ce1133cfc7ef80b6363c5394208 - nvdisplay/src/nvidia/src/libraries/nvoc/src/runtime.c d3e5f13be70c8e458401ec9bdad007dfadedcc11 - nvdisplay/src/nvidia/src/libraries/nvbitvector/nvbitvector.c 206dda159ecbc0340ac9329250302c76a504e5a8 - nvdisplay/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c 9c40bfebe2c57b972683e45dc15f358aaa2280f8 - nvdisplay/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c 836ba8b401fb6b6fcf4ccde1b644ebaefc3d8ee1 - nvdisplay/src/nvidia/src/libraries/ioaccess/ioaccess.c cade0f7049cdb2ab423a073887ed20ba1abdb17e - nvdisplay/src/nvidia/src/libraries/utils/nvassert.c 595a6238b9f04887dd418be43ff31f3e7ca6b121 - nvdisplay/src/nvidia/src/libraries/containers/map.c 057ad074f6252f7809a88f918986d7d5aacff568 - nvdisplay/src/nvidia/src/libraries/containers/queue.c 2389c9dd3b13fd2ff26d2d1342c515579079bc71 - nvdisplay/src/nvidia/src/libraries/containers/multimap.c 4418c0344b64740050ff8ef6ee085f0687a323d4 - nvdisplay/src/nvidia/src/libraries/containers/list.c 2975e5cecee2c1fd5f69a8ffc20a49016e83025c - nvdisplay/src/nvidia/src/libraries/containers/btree/btree.c f0ce913eb568f85e6e1c1b8965f2cd2b98e81928 - nvdisplay/src/nvidia/src/libraries/containers/eheap/eheap_old.c 8ed5171254e51e59fc5586e729793831165b8c0c - nvdisplay/src/nvidia/src/libraries/tls/tls.c a045a19d750d48387640ab659bb30f724c34b8c8 - nvdisplay/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c 87ac95cf569bb550adb3577c6a6658d094c59999 - nvdisplay/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - nvdisplay/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c 9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - nvdisplay/src/nvidia/src/libraries/nvport/sync/sync_common.h 6f6c83e9ee6d91fc8700e5015440f2bc72e6600b - nvdisplay/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c b55b7b59f35d848d5a3b43d63da4d2f7b0af5d3e - nvdisplay/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c 6dd0c5f2384610ea075642d8e403ddd8c8db371a - nvdisplay/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h 7416712aa964befcf8fede86e5a604871a2d00b8 - nvdisplay/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h b528ef8e238dd2c22c6549057b54fe33039c6473 - nvdisplay/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b6d6074ca77856fc5fe4ff1534c08c023ee592a4 - nvdisplay/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c caff00b37e7f58fde886abcc2737c08526fa089e - nvdisplay/src/nvidia/src/libraries/nvport/memory/memory_generic.h 8f41e7127a65102f0035c03536c701b7ecdaa909 - nvdisplay/src/nvidia/src/libraries/nvport/string/string_generic.c 2fa76d2d5ba7212f826b656aa683223a470e484c - nvdisplay/src/nvidia/src/libraries/nvport/core/core.c 66e79047600e0a40c50e709c6c82402d9b205ad0 - nvdisplay/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c 7cdc50ee31b9cde14c0ce6fcd390c5d4564e433d - nvdisplay/src/nvidia/src/libraries/nvport/cpu/cpu_common.c a305654bafc883ad28a134a04e83bbd409e0fc06 - nvdisplay/src/nvidia/src/libraries/nvport/cpu/cpu_common.h da86b765702196eb0011ac9d14873fbc1589d48b - nvdisplay/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c 1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_domain.c 883ad1cf4ed1714eb74d44d3b9a41d6a4723b650 - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_server.c dac54d97b38ad722198ec918668f175dc5122e4e - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_access_map.c d48d51a880fced52ad6e323d984e872ccf9ef3bd - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_client.c d0ae6d7a363db3fdf54ae1a760630b52a2019637 - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_resource.c 0c9581aa68a77cb9977a7fbcfd2077ccb618206e - nvdisplay/src/nvidia/src/libraries/resserv/src/rs_access_rights.c ea7be8a55a3310aa1c3926ed69c86a6491925e08 - nvdisplay/src/nvidia/src/kernel/diagnostics/nvlog.c b3a29311cc22e2dae686f8ed2df6bc828aa826cf - nvdisplay/src/nvidia/src/kernel/diagnostics/profiler.c 70507a8d43797eb3cdc13408ae8635f4a2eebce0 - nvdisplay/src/nvidia/src/kernel/diagnostics/nvlog_printf.c 8e5af753de1725dd919185c29d03ccb0934fab6e - nvdisplay/src/nvidia/src/kernel/os/os_init.c af4ffa4b423e07cf40eb863c11dbf515c7104874 - nvdisplay/src/nvidia/src/kernel/os/os_timer.c 63e5e17280d865ace8cdd8eb8a2598d3d7830ad7 - nvdisplay/src/nvidia/src/kernel/os/os_sanity.c 1793e056a0afcc5e1f5bb58b207b49c5f1556eca - nvdisplay/src/nvidia/src/kernel/os/os_stubs.c ddaf2b8e424df9147a4e2fecf3942b64b1d2b001 - nvdisplay/src/nvidia/src/kernel/rmapi/entry_points.c 820b6e63c2b11b0764305c483142f626b6f72038 - nvdisplay/src/nvidia/src/kernel/rmapi/rpc_common.c c3820fa4bb1192a9317ca834aeee3434c7eb8059 - nvdisplay/src/nvidia/src/kernel/rmapi/rmapi.c ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - nvdisplay/src/nvidia/src/kernel/rmapi/deprecated_context.h 2279fd14aab9b5f20b8fc21f04dd0fca41e418c9 - nvdisplay/src/nvidia/src/kernel/rmapi/event_notification.c 11a547cbfdbce000a6e5edf48492f5b930ddbdca - nvdisplay/src/nvidia/src/kernel/rmapi/rs_utils.c f89e982b0e31a1898e1e4749c9a8ae9f0bb59a0c - nvdisplay/src/nvidia/src/kernel/rmapi/deprecated_context.c 3a0f999e390d93b0db8272f55fbec56f6b055fe4 - nvdisplay/src/nvidia/src/kernel/rmapi/rmapi_utils.c 569f56831cde7bdc528ac2e543eea485025ec6f0 - nvdisplay/src/nvidia/src/kernel/rmapi/client.c 78f1e379c3d1df9e34baba77f78f48b8585bdc74 - nvdisplay/src/nvidia/src/kernel/rmapi/event_buffer.c 38d0205b68ea2c82709b42eb7e8b9cf92cec8828 - nvdisplay/src/nvidia/src/kernel/rmapi/rmapi_stubs.c 6f46dd43e4b3f2ad803a4c9492cb927aebffc1f0 - nvdisplay/src/nvidia/src/kernel/rmapi/client_resource.c 05669e008dfd89e5c81381e6c60230c1fe17a876 - nvdisplay/src/nvidia/src/kernel/rmapi/resource_desc.c 81f66675295315cfc52be225c2e9ee912b56fbac - nvdisplay/src/nvidia/src/kernel/rmapi/sharing.c 5f194ba056b018a8194c16b0bbb6e49c1b80a996 - nvdisplay/src/nvidia/src/kernel/rmapi/param_copy.c 59d42b6a123b062237b3b6ca382211e35057ef1e - nvdisplay/src/nvidia/src/kernel/rmapi/resource_list.h 46aa43b18480d2eb7519b2dcd0fe6a68c79b8881 - nvdisplay/src/nvidia/src/kernel/rmapi/resource.c f2c7d77e4183994d7ee414e2a87745fcd23d995e - nvdisplay/src/nvidia/src/kernel/rmapi/mapping_cpu.c e40f6742084cd04252f3ec8b8499a26547b478bc - nvdisplay/src/nvidia/src/kernel/rmapi/mapping.c 68cc7b258f934097e9dc31a38e7e3bf2ce2fe5d1 - nvdisplay/src/nvidia/src/kernel/rmapi/event.c 277441b3da96fc01199f1d2f5102490e2e6cd830 - nvdisplay/src/nvidia/src/kernel/rmapi/control.c 8e40d2f35828468f34cf6863f9bf99c20dbfc827 - nvdisplay/src/nvidia/src/kernel/rmapi/rmapi_cache.c 2f89b9059467e7f67a6a52c46aecae5cb0364ab6 - nvdisplay/src/nvidia/src/kernel/rmapi/binary_api.c d92267a3394ded5d7d218530fd16ce00a920b1d6 - nvdisplay/src/nvidia/src/kernel/rmapi/alloc_free.c bc83726df04c30d02a1852a10a22c77fdb3ef7a7 - nvdisplay/src/nvidia/src/kernel/rmapi/resource_desc.h b441ee824e9c15c82956254704949317024ceb41 - nvdisplay/src/nvidia/src/kernel/rmapi/entry_points.h 37000b419d23a8b052fc1218f09815fafb1d89c9 - nvdisplay/src/nvidia/src/kernel/core/hal_mgr.c c0822891f614e6ec847acb971e68aad8847e0cd7 - nvdisplay/src/nvidia/src/kernel/core/locks_common.c fe91b43c37b64472450cc25329d2dea74d2a9fcf - nvdisplay/src/nvidia/src/kernel/core/locks_minimal.c c68f2c96bfc6fce483a332a5824656d72986a145 - nvdisplay/src/nvidia/src/kernel/core/system.c 7b9c95f912b203c68b6ba1f62470dffee4b4efe3 - nvdisplay/src/nvidia/src/kernel/core/thread_state.c 677c655b0b8e86bdab13cdd4044de38647b00eec - nvdisplay/src/nvidia/src/kernel/core/hal/hal.c b9eabee9140c62385d070628948af0dcda3b0b1a - nvdisplay/src/nvidia/src/kernel/core/hal/hals_all.c 8eac3ea49f9a53063f7106211e5236372d87bdaf - nvdisplay/src/nvidia/src/kernel/core/hal/info_block.c fa854efc5cdf4d167dee13302ee8377191624d95 - nvdisplay/src/nvidia/src/kernel/gpu/device.c 671286de97aa63201a363fd7a22c92ee8afe4c7c - nvdisplay/src/nvidia/src/kernel/gpu/eng_state.c 1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - nvdisplay/src/nvidia/src/kernel/gpu/gpu_uuid.c 9515ea68cdac85989e4d53d4c1251115291708dd - nvdisplay/src/nvidia/src/kernel/gpu/gpu.c 5a97d4f8ce101908f1a67ffe9cc8ed00b6bf43b2 - nvdisplay/src/nvidia/src/kernel/gpu/gpu_resource.c 77573c8518ac7622211c4bdd16524d369cc14b96 - nvdisplay/src/nvidia/src/kernel/gpu/device_ctrl.c f6b4e40b638faf9770b632b404170e1ceb949be5 - nvdisplay/src/nvidia/src/kernel/gpu/gpu_gspclient.c 6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - nvdisplay/src/nvidia/src/kernel/gpu/gpu_resource_desc.c 4e1be780ac696a61f056933e5550040a2d42c6bd - nvdisplay/src/nvidia/src/kernel/gpu/gpu_device_mapping.c db44a803d81d42bfaf84f7ea1e09dc53c662acef - nvdisplay/src/nvidia/src/kernel/gpu/gpu_timeout.c 0824d200569def5bf480f2a5127911ed0ea881e6 - nvdisplay/src/nvidia/src/kernel/gpu/device_share.c caf2b80fa0f01b9a3efcd8326bf6375455f2e1b9 - nvdisplay/src/nvidia/src/kernel/gpu/gpu_access.c 89543f7085fbc2ca01b5a8baae33b5de921c79e9 - nvdisplay/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c 08be13ced6566aced2f3446bb657dae8efb41fbe - nvdisplay/src/nvidia/src/kernel/gpu/gpu_rmapi.c 56be7a21457145c3c6b2df7beb4c828b7bd1a3b4 - nvdisplay/src/nvidia/src/kernel/gpu/subdevice/subdevice.c a64c51c515eb76208a822f1f623d11e2edd8d7ac - nvdisplay/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c 086e9a51757c3989dfe0bf89ca6c0b9c7734104a - nvdisplay/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c a54628e9d2733c6d0470e1e73bca1573e6486ab3 - nvdisplay/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c 5be208cc0e1eae1f85f00bb0b502fdba74d6656c - nvdisplay/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c 0e4c2d88b61a0cf63045fe70e5ba2c81c44e37af - nvdisplay/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c ed25b1e99b860468bbf22c10177e0ba99c73894f - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c 0918cada217ca1883527fe805fc30babf7b8038d - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_channel.c 19447ad30b3fc2ee308bcc45e3409bafa5defe0d - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c 3abbef0a6fc95d6f7c7c5a16cbbbb51aaa457cc0 - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c 8cd12c2da71acede5046c772f14aff7cbd88af12 - nvdisplay/src/nvidia/src/kernel/gpu/disp/kern_disp.c e1a6dfb38025abeb5adfda929f61eb6ee44b5c84 - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c dd0bd914c6c7bfeabdd9fe87fb984702e0765624 - nvdisplay/src/nvidia/src/kernel/gpu/disp/disp_objs.c e26ade846573c08f7494f17a233b8a9e14685329 - nvdisplay/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c 8a418dce9fbeb99d5d6e175ed8c88811866f3450 - nvdisplay/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c b41502d73d7781496845377cebd0d445b8ca9dc6 - nvdisplay/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c 01e8b56f7677f5cb7f950d9aa9bd37d04153085b - nvdisplay/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c 629566bf98be863b12e6dc6aab53d8f5ea13988c - nvdisplay/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c acb2a62fb60e08eb6d16518c43c974783139813b - nvdisplay/src/nvidia/src/kernel/gpu/timer/timer.c 834efbfff64c0d01272e49a08bd6196e341985a8 - nvdisplay/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c 1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - nvdisplay/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c bc2b57acc8fa8644615168e3ddbaf7ac161a7a04 - nvdisplay/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c d6e1bd038fa0eff5d3684a5a2c766fdac77f1198 - nvdisplay/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c d4a07d1c6beb7ddb229ed6e5374343b6ce916d84 - nvdisplay/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c 2bb921b462c4b50d1f42b39b4728374c7433c8cb - nvdisplay/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c ef2a3848e0302c09869a34eba1333d19a17acc56 - nvdisplay/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c 2c66e086bb149fb1b9ca8f860566a3f5e391b2f3 - nvdisplay/src/nvidia/src/kernel/gpu/dce_client/dce_client.c ed24c0406c85dc27f0fca1bac8b0dcb7a60dca2d - nvdisplay/src/nvidia/src/kernel/gpu_mgr/gpu_group.c a5a31b9b62e6d19b934411995c315d4fdac71ca0 - nvdisplay/src/nvidia/src/kernel/gpu_mgr/gpu_db.c 37d1e3dd86e6409b8e461f90386e013194c9e4d1 - nvdisplay/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c 003e3012e87b8f8f655749db88141d74660e8d8e - nvdisplay/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c 64bd2007101cbf718beb707898e85f40071ae405 - nvdisplay/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c 9d9fcd87d784a758659b6cc8a522eaf9beac4b6c - nvdisplay/src/nvidia/src/kernel/mem_mgr/standard_mem.c 6aa752ae480e883d077de842f02444151947f82f - nvdisplay/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c 94acdcebee0cdcbf359b15803ec841e5284e1ff2 - nvdisplay/src/nvidia/src/kernel/mem_mgr/vaspace.c 5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - nvdisplay/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h 079893039c2802e1b0e6fcab5d0ee0e4dc608c84 - nvdisplay/src/nvidia/src/kernel/mem_mgr/io_vaspace.c 15f3290908931a9e4d74b0c0ec9e460956e39089 - nvdisplay/src/nvidia/src/kernel/mem_mgr/system_mem.c 623dad3ec0172ed7b3818caece0db5687d587ff3 - nvdisplay/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c 956b7871a267b7d381d1cd7d4689ef1aec1da415 - nvdisplay/src/nvidia/src/kernel/mem_mgr/mem.c db0dc6915302888de06e3aa094d961cfe25e0059 - nvdisplay/src/nvidia/interface/nvrm_registry.h df4d313c66e75fa9f4a1ff8ea2c389a6ecd6eb3d - nvdisplay/src/nvidia/interface/acpigenfuncs.h 35da37c070544f565d0f1de82abc7569b5df06af - nvdisplay/src/nvidia/interface/nv_firmware_types.h bff92c9767308a13df1d0858d5f9c82af155679a - nvdisplay/src/nvidia/interface/nvacpitypes.h 059c1ab76a5f097593f0f8a79203e14a9cec6287 - nvdisplay/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c d50ff73efaf5bc7e9cb3f67ed07ede01e8fad6f6 - nvdisplay/src/nvidia/interface/deprecated/rmapi_deprecated.h 8c43da4fae8a0aeb374ce46ce19eb8c38b552ae4 - nvdisplay/src/nvidia-modeset/Makefile a0cc9f36fdd73c99ad8f264efa58043d42353b0a - nvdisplay/src/nvidia-modeset/lib/nvkms-sync.c 381fba24abae75d98b3ada184ed0cd57335819a9 - nvdisplay/src/nvidia-modeset/lib/nvkms-format.c 9c90df1fa1b6dd33a7e330c47e94b5b9194ad419 - nvdisplay/src/nvidia-modeset/include/nvkms-3dvision.h ebafc51b2b274cd1818e471850a5efa9618eb17d - nvdisplay/src/nvidia-modeset/include/nvkms-prealloc.h 33dbf734c9757c2c40adb2fb185e964870217743 - nvdisplay/src/nvidia-modeset/include/nvkms-flip-workarea.h 8f1994f3f8d100ddcf8b23f5b24872bed939d885 - nvdisplay/src/nvidia-modeset/include/nvkms-vrr.h 4020b2a0d4f177c143db40b33d122017416dfa2e - nvdisplay/src/nvidia-modeset/include/nvkms-evo1.h 182a47c12496b8b7da1c4fe7035d6b36d7316322 - nvdisplay/src/nvidia-modeset/include/nvkms-prealloc-types.h c1c7047929aafc849a924c7fa9f8bc206b8e7524 - nvdisplay/src/nvidia-modeset/include/g_nvkms-evo-states.h 412d8028a548e67e9ef85cb7d3f88385e70c56f9 - nvdisplay/src/nvidia-modeset/include/nvkms-console-restore.h be3a1682574426c1bf75fcdf88278c18f2783c3f - nvdisplay/src/nvidia-modeset/include/nvkms-dpy.h 20213d53bb52bf9f38400e35d7963d0f4db22f96 - nvdisplay/src/nvidia-modeset/include/nvkms-evo-states.h 70d9251f331bbf28f5c5bbdf939ebad94db9362d - nvdisplay/src/nvidia-modeset/include/nvkms-softfloat.h d7861e2373ac04ffaf6c15caeba887f727aa41fb - nvdisplay/src/nvidia-modeset/include/nvkms-dma.h 4f5d723c80f607a0e5f797835d561795dbe40ada - nvdisplay/src/nvidia-modeset/include/nvkms-cursor.h 64af1df50d2a5b827c1c829a303844de20527522 - nvdisplay/src/nvidia-modeset/include/nvkms-rm.h c1904d38785649d2614563d0cd7de28a15ce4486 - nvdisplay/src/nvidia-modeset/include/nvkms-modeset.h be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - nvdisplay/src/nvidia-modeset/include/nvkms-attributes.h 853d9005ec695cb5a1c7966a1f93fe0c9c8278cf - nvdisplay/src/nvidia-modeset/include/nvkms-hdmi.h 6b21a68e254becdd2641bc456f194f54c23abe51 - nvdisplay/src/nvidia-modeset/include/nvkms-framelock.h 16a2e187afedf93bade7967816b0723708544e0d - nvdisplay/src/nvidia-modeset/include/nvkms-modeset-workarea.h ef78e73ec9c0b8341bd83306d1f3b2c35e20c43a - nvdisplay/src/nvidia-modeset/include/nvkms-utils.h f5f3b11c78a8b0eef40c09e1751615a47f516edb - nvdisplay/src/nvidia-modeset/include/nvkms-hal.h d4889d903bf4de06d85e55b005206ed57f28af69 - nvdisplay/src/nvidia-modeset/include/nvkms-lut.h 9dd131355ed1e25a7cee7bfef00501cf6427ae92 - nvdisplay/src/nvidia-modeset/include/nvkms-private.h 8a6f26ccf2e563b78f6e189c999ba470ed35271d - nvdisplay/src/nvidia-modeset/include/nvkms-evo.h 75e8a8747795fad89b4d2b662477e5454863dcc7 - nvdisplay/src/nvidia-modeset/include/nvkms-flip.h d3f5bc85b538a3a1d4c2389c81001be91205ec9f - nvdisplay/src/nvidia-modeset/include/nvkms-modeset-types.h 260b6ef87c755e55a803adad4ce49f2d57315f9a - nvdisplay/src/nvidia-modeset/include/nvkms-event.h 867e3091a945d3d43b2f28393b40edeb9d27597b - nvdisplay/src/nvidia-modeset/include/nvkms-rmapi.h 118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - nvdisplay/src/nvidia-modeset/include/nvkms-modepool.h c8f714e80dd4bb60ceab0c0c7e6a5b3304940946 - nvdisplay/src/nvidia-modeset/include/nvkms-types.h 71e8c5d3c4dfec6f2261654c3fc91210bff78da9 - nvdisplay/src/nvidia-modeset/include/nvkms-surface.h 52b6c19cce320677bd3a4dfcf1698b236f29e59e - nvdisplay/src/nvidia-modeset/include/dp/nvdp-device.h 4625828efd425e1b29835ab91fcc3d2d85e92389 - nvdisplay/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - nvdisplay/src/nvidia-modeset/include/dp/nvdp-timer.h 17f6fbbd5e0a75faec21347b691f44dcb65c01aa - nvdisplay/src/nvidia-modeset/include/dp/nvdp-connector.h 727bd77cfbc9ac4989c2ab7eec171ceb516510aa - nvdisplay/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h e48c2ec8145a6f2099dddb24d2900e3ae94ec02e - nvdisplay/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h f6875ef0da055900ef6ef1da5dc94cba2837e4d0 - nvdisplay/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c 394ea31caa5957cfb2c8bb8c3cc0e4703213fe7f - nvdisplay/src/nvidia-modeset/kapi/src/nvkms-kapi.c 01d943d6edb0c647c2b8dbc44460948665b03e7a - nvdisplay/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c fb242aa7a53983118ee019415076033e596374af - nvdisplay/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h 009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - nvdisplay/src/nvidia-modeset/kapi/interface/nvkms-kapi.h f8bdd07a27296ef6aab86cc9dbccf8df811fff24 - nvdisplay/src/nvidia-modeset/src/nvkms-modeset.c 21c8184de2c9150c21ac5d6fba24e79e513a0a69 - nvdisplay/src/nvidia-modeset/src/nvkms-evo.c 1918ca3aa611cd9dfc79d46d038ab22706f0b1ed - nvdisplay/src/nvidia-modeset/src/nvkms-cursor3.c fc8182cc1f3af77125dbfa328996bcfe0387cc41 - nvdisplay/src/nvidia-modeset/src/nvkms-rm.c 45230e56d29c98ea0f10f87c1b16ba70c96f24d5 - nvdisplay/src/nvidia-modeset/src/nvkms-evo3.c 05548338a73ade1b3c2ad1cebf1ab5eb16ef6c9b - nvdisplay/src/nvidia-modeset/src/nvkms-flip.c 673ad86616f9863766bfec0e118c918297d32010 - nvdisplay/src/nvidia-modeset/src/g_nvkms-evo-states.c 85ddb19f89833ca57fd2deff2e2b4566e162a56c - nvdisplay/src/nvidia-modeset/src/nvkms-hal.c 5acf19920d56793d96c80e8461b0d0213c871b34 - nvdisplay/src/nvidia-modeset/src/nvkms-surface.c 8f22c278a5839d36f74f85469b2d927d9265cb80 - nvdisplay/src/nvidia-modeset/src/nvkms-utils.c 94f4736acf7981cebfd74302a21f19cdbafa8d71 - nvdisplay/src/nvidia-modeset/src/nvkms-hdmi.c 24156462f25922c8de5b5d2558db36b2e68b28ed - nvdisplay/src/nvidia-modeset/src/nvkms-dpy.c eb09642e8b5d9333699f817caaf20483c840b376 - nvdisplay/src/nvidia-modeset/src/nvkms.c ab17e5b4cafa92aa03691a0c187ef8c9ae53fa59 - nvdisplay/src/nvidia-modeset/src/nvkms-cursor.c b55665d7bceaad04bbf29a68f44536518302c3d6 - nvdisplay/src/nvidia-modeset/src/nvkms-evo2.c 8415bcd6ab34e356374659e965790a0715ed7971 - nvdisplay/src/nvidia-modeset/src/nvkms-prealloc.c 07c2f10473e2fbe921b2781cc107b5e56e6373e3 - nvdisplay/src/nvidia-modeset/src/nvkms-attributes.c f27f52dc428a6adeb936c8cf99e1fc2d8b0ad667 - nvdisplay/src/nvidia-modeset/src/nvkms-dma.c da726d20eea99a96af4c10aace88f419e8ee2a34 - nvdisplay/src/nvidia-modeset/src/nvkms-event.c c98f76bcfc7c654a619762ebc3a2599f9aa89f8d - nvdisplay/src/nvidia-modeset/src/nvkms-3dvision.c c2870190ca4c4d5b3a439386583d0a7c193d6263 - nvdisplay/src/nvidia-modeset/src/nvkms-hw-states.c c799d52bdc792efc377fb5cd307b0eb445c44d6a - nvdisplay/src/nvidia-modeset/src/nvkms-cursor2.c c2d0e6bef0c4929a3ca4adfd74bd6168fa4aa000 - nvdisplay/src/nvidia-modeset/src/nvkms-framelock.c e9626eee225e58ec2d5be756c5015775ca5e54b9 - nvdisplay/src/nvidia-modeset/src/nvkms-vrr.c 5c79c271609ebcc739f8d73d7d47f0b376298438 - nvdisplay/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c 86da3c7c09354d2c49d95562aba15cbedb543d9b - nvdisplay/src/nvidia-modeset/src/nvkms-evo1.c 574b1268ff83e4e5ed4da15609247a5c0ec8f51b - nvdisplay/src/nvidia-modeset/src/nvkms-console-restore.c ec97ab37cdf2cec0283657c2c04a139a1a168337 - nvdisplay/src/nvidia-modeset/src/nvkms-modepool.c 5fb73f35841c41e7376531732cb12303224e61ad - nvdisplay/src/nvidia-modeset/src/nvkms-lut.c f96cd982b4c05351faa31d04ac30d6fa7c866bcb - nvdisplay/src/nvidia-modeset/src/dp/nvdp-timer.cpp 6b985fc50b5040ce1a81418bed73a60edb5d3289 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-timer.hpp 8af6062034d464f778969e26d3bf5a9b4cdaccf0 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-connector.cpp f6c3e8bd4ee13970737e96f9d9a3e4d8afdf9695 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp f2a05c29383bfc8631ad31909f31a8351501eb27 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-device.cpp 31767fd551f3c89e5b00f54147b6a8e8fa3320e3 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp 51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp 110ac212ee8832c3fa3c4f45d6d33eed0301e992 - nvdisplay/src/nvidia-modeset/src/dp/nvdp-host.cpp 69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - nvdisplay/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp 7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - nvdisplay/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h 17855f638fd09abfec7d188e49b396793a9f6106 - nvdisplay/src/nvidia-modeset/os-interface/include/nvkms.h 445a409950ab8f36cfa24d1dc73e59718d335263 - nvdisplay/src/nvidia-modeset/interface/nvkms-api.h 5c4c05e5a638888babb5a8af2f0a61c94ecd150b - nvdisplay/src/nvidia-modeset/interface/nvkms-format.h 2ea1436104463c5e3d177e8574c3b4298976d37e - nvdisplay/src/nvidia-modeset/interface/nvkms-ioctl.h 910255a4d92e002463175a28e38c3f24716fb654 - nvdisplay/src/nvidia-modeset/interface/nvkms-api-types.h 281fdc23f82d8bdb94b26d0093b444eb0c056f51 - nvdisplay/src/nvidia-modeset/interface/nvkms-sync.h Change-Id: I7bb776aa8b86d8401aba1cbe9e6c56713750eba2
5641 lines
136 KiB
C
5641 lines
136 KiB
C
/*
|
|
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include "nvmisc.h"
|
|
#include "os-interface.h"
|
|
#include "nv-linux.h"
|
|
#include "nv-p2p.h"
|
|
#include "nv-reg.h"
|
|
#include "nv-msi.h"
|
|
#include "nv-pci-table.h"
|
|
|
|
#if defined(NV_UVM_ENABLE)
|
|
#include "nv_uvm_interface.h"
|
|
#endif
|
|
|
|
#if defined(NV_VGPU_KVM_BUILD)
|
|
#include "nv-vgpu-vfio-interface.h"
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "nv-frontend.h"
|
|
#include "nv-hypervisor.h"
|
|
#include "nv-ibmnpu.h"
|
|
#include "nv-rsync.h"
|
|
#include "nv-kthread-q.h"
|
|
#include "nv-pat.h"
|
|
#include "nv-dmabuf.h"
|
|
|
|
#if !defined(CONFIG_RETPOLINE)
|
|
#include "nv-retpoline.h"
|
|
#endif
|
|
|
|
#include <linux/firmware.h>
|
|
|
|
#include <sound/core.h> /* HDA struct snd_card */
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#if defined(NV_SOUND_HDAUDIO_H_PRESENT)
|
|
#include "sound/hdaudio.h"
|
|
#endif
|
|
|
|
#if defined(NV_SOUND_HDA_CODEC_H_PRESENT)
|
|
#include <sound/core.h>
|
|
#include <sound/hda_codec.h>
|
|
#include <sound/hda_verbs.h>
|
|
#endif
|
|
|
|
#if defined(NV_SEQ_READ_ITER_PRESENT)
|
|
#include <linux/uio.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/kernfs.h>
|
|
#endif
|
|
|
|
#include <linux/dmi.h> /* System DMI info */
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include "conftest/patches.h"
|
|
|
|
|
|
|
|
|
|
|
|
#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000
|
|
#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900
|
|
#define RM_UNHANDLED_TIMEOUT_US 100000
|
|
|
|
const NvBool nv_is_rm_firmware_supported_os = NV_TRUE;
|
|
|
|
// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead
|
|
char *rm_firmware_active = NULL;
|
|
NV_MODULE_STRING_PARAMETER(rm_firmware_active);
|
|
|
|
#define NV_FIRMWARE_GSP_FILENAME "nvidia/" NV_VERSION_STRING "/gsp.bin"
|
|
#define NV_FIRMWARE_GSP_LOG_FILENAME "nvidia/" NV_VERSION_STRING "/gsp_log.bin"
|
|
|
|
MODULE_FIRMWARE(NV_FIRMWARE_GSP_FILENAME);
|
|
|
|
/*
|
|
* Global NVIDIA capability state, for GPU driver
|
|
*/
|
|
nv_cap_t *nvidia_caps_root = NULL;
|
|
|
|
/*
|
|
* our global state; one per device
|
|
*/
|
|
NvU32 num_nv_devices = 0;
|
|
NvU32 num_probed_nv_devices = 0;
|
|
|
|
nv_linux_state_t *nv_linux_devices;
|
|
|
|
/*
|
|
* And one for the control device
|
|
*/
|
|
nv_linux_state_t nv_ctl_device = { { 0 } };
|
|
extern NvU32 nv_dma_remap_peer_mmio;
|
|
|
|
nv_kthread_q_t nv_kthread_q;
|
|
nv_kthread_q_t nv_deferred_close_kthread_q;
|
|
|
|
struct rw_semaphore nv_system_pm_lock;
|
|
|
|
#if defined(CONFIG_PM)
|
|
static nv_power_state_t nv_system_power_state;
|
|
static nv_pm_action_depth_t nv_system_pm_action_depth;
|
|
struct semaphore nv_system_power_state_lock;
|
|
#endif
|
|
|
|
void *nvidia_p2p_page_t_cache;
|
|
static void *nvidia_pte_t_cache;
|
|
void *nvidia_stack_t_cache;
|
|
static nvidia_stack_t *__nv_init_sp;
|
|
|
|
static int nv_tce_bypass_mode = NV_TCE_BYPASS_MODE_DEFAULT;
|
|
|
|
struct semaphore nv_linux_devices_lock;
|
|
|
|
static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE;
|
|
|
|
// True if all the successfully probed devices support ATS
|
|
// Assigned at device probe (module init) time
|
|
NvBool nv_ats_supported = NVCPU_IS_PPC64LE
|
|
|
|
|
|
|
|
|
|
|
|
;
|
|
|
|
// allow an easy way to convert all debug printfs related to events
|
|
// back and forth between 'info' and 'errors'
|
|
#if defined(NV_DBG_EVENTS)
|
|
#define NV_DBG_EVENTINFO NV_DBG_ERRORS
|
|
#else
|
|
#define NV_DBG_EVENTINFO NV_DBG_INFO
|
|
#endif
|
|
|
|
#if defined(HDA_MAX_CODECS)
|
|
#define NV_HDA_MAX_CODECS HDA_MAX_CODECS
|
|
#else
|
|
#define NV_HDA_MAX_CODECS 8
|
|
#endif
|
|
|
|
/***
|
|
*** STATIC functions, only in this file
|
|
***/
|
|
|
|
/* nvos_ functions.. do not take a state device parameter */
|
|
static int nvos_count_devices(void);
|
|
|
|
static nv_alloc_t *nvos_create_alloc(struct device *, int);
|
|
static int nvos_free_alloc(nv_alloc_t *);
|
|
|
|
/***
|
|
*** EXPORTS to Linux Kernel
|
|
***/
|
|
|
|
static irqreturn_t nvidia_isr_common_bh (void *);
|
|
static void nvidia_isr_bh_unlocked (void *);
|
|
static int nvidia_ctl_open (struct inode *, struct file *);
|
|
static int nvidia_ctl_close (struct inode *, struct file *);
|
|
|
|
const char *nv_device_name = MODULE_NAME;
|
|
static const char *nvidia_stack_cache_name = MODULE_NAME "_stack_cache";
|
|
static const char *nvidia_pte_cache_name = MODULE_NAME "_pte_cache";
|
|
static const char *nvidia_p2p_page_cache_name = MODULE_NAME "_p2p_page_cache";
|
|
|
|
static int nvidia_open (struct inode *, struct file *);
|
|
static int nvidia_close (struct inode *, struct file *);
|
|
static unsigned int nvidia_poll (struct file *, poll_table *);
|
|
static int nvidia_ioctl (struct inode *, struct file *, unsigned int, unsigned long);
|
|
|
|
/* character device entry points*/
|
|
nvidia_module_t nv_fops = {
|
|
.owner = THIS_MODULE,
|
|
.module_name = MODULE_NAME,
|
|
.instance = MODULE_INSTANCE_NUMBER,
|
|
.open = nvidia_open,
|
|
.close = nvidia_close,
|
|
.ioctl = nvidia_ioctl,
|
|
.mmap = nvidia_mmap,
|
|
.poll = nvidia_poll,
|
|
};
|
|
|
|
#if defined(CONFIG_PM)
|
|
static int nv_pmops_suspend (struct device *dev);
|
|
static int nv_pmops_resume (struct device *dev);
|
|
static int nv_pmops_freeze (struct device *dev);
|
|
static int nv_pmops_thaw (struct device *dev);
|
|
static int nv_pmops_restore (struct device *dev);
|
|
static int nv_pmops_poweroff (struct device *dev);
|
|
static int nv_pmops_runtime_suspend (struct device *dev);
|
|
static int nv_pmops_runtime_resume (struct device *dev);
|
|
|
|
struct dev_pm_ops nv_pm_ops = {
|
|
.suspend = nv_pmops_suspend,
|
|
.resume = nv_pmops_resume,
|
|
.freeze = nv_pmops_freeze,
|
|
.thaw = nv_pmops_thaw,
|
|
.poweroff = nv_pmops_poweroff,
|
|
.restore = nv_pmops_restore,
|
|
.runtime_suspend = nv_pmops_runtime_suspend,
|
|
.runtime_resume = nv_pmops_runtime_resume,
|
|
};
|
|
#endif
|
|
|
|
/***
|
|
*** see nv.h for functions exported to other parts of resman
|
|
***/
|
|
|
|
/***
|
|
*** STATIC functions
|
|
***/
|
|
|
|
#if defined(NVCPU_X86_64)
|
|
#define NV_AMD_SEV_BIT BIT(1)
|
|
|
|
static
|
|
NvBool nv_is_sev_supported(
|
|
void
|
|
)
|
|
{
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
/* Check for the SME/SEV support leaf */
|
|
eax = 0x80000000;
|
|
ecx = 0;
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
if (eax < 0x8000001f)
|
|
return NV_FALSE;
|
|
|
|
eax = 0x8000001f;
|
|
ecx = 0;
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
/* Check whether SEV is supported */
|
|
if (!(eax & NV_AMD_SEV_BIT))
|
|
return NV_FALSE;
|
|
|
|
return NV_TRUE;
|
|
}
|
|
#endif
|
|
|
|
static
|
|
void nv_sev_init(
|
|
void
|
|
)
|
|
{
|
|
#if defined(MSR_AMD64_SEV) && defined(NVCPU_X86_64)
|
|
NvU32 lo_val, hi_val;
|
|
|
|
if (!nv_is_sev_supported())
|
|
return;
|
|
|
|
rdmsr(MSR_AMD64_SEV, lo_val, hi_val);
|
|
|
|
os_sev_status = lo_val;
|
|
#if defined(MSR_AMD64_SEV_ENABLED)
|
|
os_sev_enabled = (os_sev_status & MSR_AMD64_SEV_ENABLED);
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
static
|
|
nv_alloc_t *nvos_create_alloc(
|
|
struct device *dev,
|
|
int num_pages
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
unsigned int pt_size, i;
|
|
|
|
NV_KMALLOC(at, sizeof(nv_alloc_t));
|
|
if (at == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n");
|
|
return NULL;
|
|
}
|
|
|
|
memset(at, 0, sizeof(nv_alloc_t));
|
|
|
|
at->dev = dev;
|
|
pt_size = num_pages * sizeof(nvidia_pte_t *);
|
|
if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
|
|
NV_KFREE(at, sizeof(nv_alloc_t));
|
|
return NULL;
|
|
}
|
|
|
|
memset(at->page_table, 0, pt_size);
|
|
at->num_pages = num_pages;
|
|
NV_ATOMIC_SET(at->usage_count, 0);
|
|
|
|
for (i = 0; i < at->num_pages; i++)
|
|
{
|
|
at->page_table[i] = NV_KMEM_CACHE_ALLOC(nvidia_pte_t_cache);
|
|
if (at->page_table[i] == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to allocate page table entry\n");
|
|
nvos_free_alloc(at);
|
|
return NULL;
|
|
}
|
|
memset(at->page_table[i], 0, sizeof(nvidia_pte_t));
|
|
}
|
|
|
|
at->pid = os_get_current_process();
|
|
|
|
return at;
|
|
}
|
|
|
|
static
|
|
int nvos_free_alloc(
|
|
nv_alloc_t *at
|
|
)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (at == NULL)
|
|
return -1;
|
|
|
|
if (NV_ATOMIC_READ(at->usage_count))
|
|
return 1;
|
|
|
|
for (i = 0; i < at->num_pages; i++)
|
|
{
|
|
if (at->page_table[i] != NULL)
|
|
NV_KMEM_CACHE_FREE(at->page_table[i], nvidia_pte_t_cache);
|
|
}
|
|
os_free_mem(at->page_table);
|
|
|
|
NV_KFREE(at, sizeof(nv_alloc_t));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
nv_module_resources_exit(nv_stack_t *sp)
|
|
{
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache);
|
|
NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache);
|
|
NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache);
|
|
}
|
|
|
|
static int __init
|
|
nv_module_resources_init(nv_stack_t **sp)
|
|
{
|
|
int rc = -ENOMEM;
|
|
|
|
nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE(nvidia_stack_cache_name,
|
|
nvidia_stack_t);
|
|
if (nvidia_stack_t_cache == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: nvidia_stack_t cache allocation failed.\n");
|
|
goto exit;
|
|
}
|
|
|
|
nvidia_pte_t_cache = NV_KMEM_CACHE_CREATE(nvidia_pte_cache_name,
|
|
nvidia_pte_t);
|
|
if (nvidia_pte_t_cache == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: nvidia_pte_t cache allocation failed.\n");
|
|
goto exit;
|
|
}
|
|
|
|
nvidia_p2p_page_t_cache = NV_KMEM_CACHE_CREATE(nvidia_p2p_page_cache_name,
|
|
nvidia_p2p_page_t);
|
|
if (nvidia_p2p_page_t_cache == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: nvidia_p2p_page_t cache allocation failed.\n");
|
|
goto exit;
|
|
}
|
|
|
|
rc = nv_kmem_cache_alloc_stack(sp);
|
|
if (rc < 0)
|
|
{
|
|
goto exit;
|
|
}
|
|
|
|
exit:
|
|
if (rc < 0)
|
|
{
|
|
nv_kmem_cache_free_stack(*sp);
|
|
|
|
NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache);
|
|
NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache);
|
|
NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
nv_module_state_exit(nv_stack_t *sp)
|
|
{
|
|
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
|
|
|
|
nv_teardown_pat_support();
|
|
|
|
nv_kthread_q_stop(&nv_deferred_close_kthread_q);
|
|
nv_kthread_q_stop(&nv_kthread_q);
|
|
|
|
nv_lock_destroy_locks(sp, nv);
|
|
}
|
|
|
|
static int
|
|
nv_module_state_init(nv_stack_t *sp)
|
|
{
|
|
int rc;
|
|
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
|
|
|
|
nv->os_state = (void *)&nv_ctl_device;
|
|
|
|
if (!nv_lock_init_locks(sp, nv))
|
|
{
|
|
return -ENOMEM;
|
|
}
|
|
|
|
rc = nv_kthread_q_init(&nv_kthread_q, "nv_queue");
|
|
if (rc != 0)
|
|
{
|
|
goto exit;
|
|
}
|
|
|
|
rc = nv_kthread_q_init(&nv_deferred_close_kthread_q, "nv_queue");
|
|
if (rc != 0)
|
|
{
|
|
nv_kthread_q_stop(&nv_kthread_q);
|
|
goto exit;
|
|
}
|
|
|
|
rc = nv_init_pat_support(sp);
|
|
if (rc < 0)
|
|
{
|
|
nv_kthread_q_stop(&nv_deferred_close_kthread_q);
|
|
nv_kthread_q_stop(&nv_kthread_q);
|
|
goto exit;
|
|
}
|
|
|
|
nv_linux_devices = NULL;
|
|
NV_INIT_MUTEX(&nv_linux_devices_lock);
|
|
init_rwsem(&nv_system_pm_lock);
|
|
|
|
#if defined(CONFIG_PM)
|
|
NV_INIT_MUTEX(&nv_system_power_state_lock);
|
|
nv_system_power_state = NV_POWER_STATE_RUNNING;
|
|
nv_system_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT;
|
|
#endif
|
|
|
|
NV_SPIN_LOCK_INIT(&nv_ctl_device.snapshot_timer_lock);
|
|
|
|
exit:
|
|
if (rc < 0)
|
|
{
|
|
nv_lock_destroy_locks(sp, nv);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void __init
|
|
nv_registry_keys_init(nv_stack_t *sp)
|
|
{
|
|
NV_STATUS status;
|
|
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
|
|
NvU32 data;
|
|
|
|
/*
|
|
* Determine the TCE bypass mode here so it can be used during
|
|
* device probe. Also determine whether we should allow
|
|
* user-mode NUMA onlining of device memory.
|
|
*/
|
|
if (NVCPU_IS_PPC64LE)
|
|
{
|
|
status = rm_read_registry_dword(sp, nv,
|
|
NV_REG_TCE_BYPASS_MODE,
|
|
&data);
|
|
if ((status == NV_OK) && ((int)data != NV_TCE_BYPASS_MODE_DEFAULT))
|
|
{
|
|
nv_tce_bypass_mode = data;
|
|
}
|
|
|
|
if (NVreg_EnableUserNUMAManagement)
|
|
{
|
|
/* Force on the core RM registry key to match. */
|
|
status = rm_write_registry_dword(sp, nv, "RMNumaOnlining", 1);
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
}
|
|
|
|
status = rm_read_registry_dword(sp, nv, NV_DMA_REMAP_PEER_MMIO, &data);
|
|
if (status == NV_OK)
|
|
{
|
|
nv_dma_remap_peer_mmio = data;
|
|
}
|
|
}
|
|
|
|
static void __init
|
|
nv_report_applied_patches(void)
|
|
{
|
|
unsigned i;
|
|
|
|
for (i = 0; __nv_patches[i].short_description; i++)
|
|
{
|
|
if (i == 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: Applied patches:\n");
|
|
}
|
|
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: Patch #%d: %s\n", i + 1, __nv_patches[i].short_description);
|
|
}
|
|
}
|
|
|
|
static void
|
|
nv_drivers_exit(void)
|
|
{
|
|
|
|
nv_platform_unregister_driver();
|
|
|
|
nv_pci_unregister_driver();
|
|
|
|
nvidia_unregister_module(&nv_fops);
|
|
}
|
|
|
|
static int __init
|
|
nv_drivers_init(void)
|
|
{
|
|
int rc;
|
|
|
|
rc = nvidia_register_module(&nv_fops);
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to register character device.\n");
|
|
return rc;
|
|
}
|
|
|
|
rc = nv_pci_register_driver();
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA PCI devices found.\n");
|
|
rc = -ENODEV;
|
|
goto exit;
|
|
}
|
|
|
|
|
|
rc = nv_platform_register_driver();
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: SOC driver registration failed!\n");
|
|
nv_pci_unregister_driver();
|
|
rc = -ENODEV;
|
|
}
|
|
|
|
|
|
exit:
|
|
if (rc < 0)
|
|
{
|
|
nvidia_unregister_module(&nv_fops);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void
|
|
nv_module_exit(nv_stack_t *sp)
|
|
{
|
|
nv_module_state_exit(sp);
|
|
|
|
rm_shutdown_rm(sp);
|
|
|
|
nv_destroy_rsync_info();
|
|
|
|
|
|
|
|
|
|
nv_cap_drv_exit();
|
|
|
|
nv_module_resources_exit(sp);
|
|
}
|
|
|
|
static int __init
|
|
nv_module_init(nv_stack_t **sp)
|
|
{
|
|
int rc;
|
|
|
|
rc = nv_module_resources_init(sp);
|
|
if (rc < 0)
|
|
{
|
|
return rc;
|
|
}
|
|
|
|
rc = nv_cap_drv_init();
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: nv-cap-drv init failed.\n");
|
|
goto cap_drv_exit;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nv_init_rsync_info();
|
|
nv_sev_init();
|
|
|
|
if (!rm_init_rm(*sp))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n");
|
|
rc = -EIO;
|
|
goto nvlink_exit;
|
|
}
|
|
|
|
rc = nv_module_state_init(*sp);
|
|
if (rc < 0)
|
|
{
|
|
goto init_rm_exit;
|
|
}
|
|
|
|
return rc;
|
|
|
|
init_rm_exit:
|
|
rm_shutdown_rm(*sp);
|
|
|
|
nvlink_exit:
|
|
nv_destroy_rsync_info();
|
|
|
|
|
|
|
|
|
|
cap_drv_exit:
|
|
nv_cap_drv_exit();
|
|
nv_module_resources_exit(*sp);
|
|
|
|
return rc;
|
|
}
|
|
|
|
/*
|
|
* In this function we check for the cases where GPU exclusion is not
|
|
* honored, and issue a warning.
|
|
*
|
|
* Only GPUs that support a mechanism to query UUID prior to
|
|
* initializing the GPU can be excluded, so that we can detect and
|
|
* exclude them during device probe. This function checks that an
|
|
* initialized GPU was not specified in the exclusion list, and issues a
|
|
* warning if so.
|
|
*/
|
|
static void
|
|
nv_assert_not_in_gpu_exclusion_list(
|
|
nvidia_stack_t *sp,
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
char *uuid = rm_get_gpu_uuid(sp, nv);
|
|
|
|
if (uuid == NULL)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID");
|
|
return;
|
|
}
|
|
|
|
if (nv_is_uuid_in_gpu_exclusion_list(uuid))
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_WARNINGS, nv,
|
|
"Could not exclude GPU %s because PBI is not supported\n",
|
|
uuid);
|
|
WARN_ON(1);
|
|
}
|
|
|
|
os_free_mem(uuid);
|
|
|
|
return;
|
|
}
|
|
|
|
static int __init nv_caps_root_init(void)
|
|
{
|
|
nvidia_caps_root = os_nv_cap_init("driver/" MODULE_NAME);
|
|
|
|
return (nvidia_caps_root == NULL) ? -ENOENT : 0;
|
|
}
|
|
|
|
static void nv_caps_root_exit(void)
|
|
{
|
|
os_nv_cap_destroy_entry(nvidia_caps_root);
|
|
nvidia_caps_root = NULL;
|
|
}
|
|
|
|
int __init nvidia_init_module(void)
|
|
{
|
|
int rc;
|
|
NvU32 count;
|
|
nvidia_stack_t *sp = NULL;
|
|
const NvBool is_nvswitch_present = os_is_nvswitch_present();
|
|
|
|
nv_memdbg_init();
|
|
|
|
rc = nv_procfs_init();
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize procfs.\n");
|
|
return rc;
|
|
}
|
|
|
|
rc = nv_caps_root_init();
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize capabilities.\n");
|
|
goto procfs_exit;
|
|
}
|
|
|
|
rc = nv_module_init(&sp);
|
|
if (rc < 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize module.\n");
|
|
goto caps_root_exit;
|
|
}
|
|
|
|
count = nvos_count_devices();
|
|
if ((count == 0) && (!is_nvswitch_present))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA GPU found.\n");
|
|
rc = -ENODEV;
|
|
goto module_exit;
|
|
}
|
|
|
|
rc = nv_drivers_init();
|
|
if (rc < 0)
|
|
{
|
|
goto module_exit;
|
|
}
|
|
|
|
if (num_probed_nv_devices != count)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: The NVIDIA probe routine was not called for %d device(s).\n",
|
|
count - num_probed_nv_devices);
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: This can occur when a driver such as: \n"
|
|
"NVRM: nouveau, rivafb, nvidiafb or rivatv "
|
|
"\nNVRM: was loaded and obtained ownership of the NVIDIA device(s).\n");
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: Try unloading the conflicting kernel module (and/or\n"
|
|
"NVRM: reconfigure your kernel without the conflicting\n"
|
|
"NVRM: driver(s)), then try loading the NVIDIA kernel module\n"
|
|
"NVRM: again.\n");
|
|
}
|
|
|
|
if ((num_probed_nv_devices == 0) && (!is_nvswitch_present))
|
|
{
|
|
rc = -ENODEV;
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA devices probed.\n");
|
|
goto drivers_exit;
|
|
}
|
|
|
|
if (num_probed_nv_devices != num_nv_devices)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: The NVIDIA probe routine failed for %d device(s).\n",
|
|
num_probed_nv_devices - num_nv_devices);
|
|
}
|
|
|
|
if ((num_nv_devices == 0) && (!is_nvswitch_present))
|
|
{
|
|
rc = -ENODEV;
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: None of the NVIDIA devices were initialized.\n");
|
|
goto drivers_exit;
|
|
}
|
|
|
|
/*
|
|
* Initialize registry keys after PCI driver registration has
|
|
* completed successfully to support per-device module
|
|
* parameters.
|
|
*/
|
|
nv_registry_keys_init(sp);
|
|
|
|
nv_report_applied_patches();
|
|
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: loading %s\n", pNVRM_ID);
|
|
|
|
#if defined(NV_UVM_ENABLE)
|
|
rc = nv_uvm_init();
|
|
if (rc != 0)
|
|
{
|
|
goto drivers_exit;
|
|
}
|
|
#endif
|
|
|
|
__nv_init_sp = sp;
|
|
|
|
return 0;
|
|
|
|
drivers_exit:
|
|
nv_drivers_exit();
|
|
|
|
module_exit:
|
|
nv_module_exit(sp);
|
|
|
|
caps_root_exit:
|
|
nv_caps_root_exit();
|
|
|
|
procfs_exit:
|
|
nv_procfs_exit();
|
|
|
|
return rc;
|
|
}
|
|
|
|
void nvidia_exit_module(void)
|
|
{
|
|
nvidia_stack_t *sp = __nv_init_sp;
|
|
|
|
#if defined(NV_UVM_ENABLE)
|
|
nv_uvm_exit();
|
|
#endif
|
|
|
|
nv_drivers_exit();
|
|
|
|
nv_module_exit(sp);
|
|
|
|
nv_caps_root_exit();
|
|
|
|
nv_procfs_exit();
|
|
|
|
nv_memdbg_exit();
|
|
}
|
|
|
|
static void *nv_alloc_file_private(void)
|
|
{
|
|
nv_linux_file_private_t *nvlfp;
|
|
unsigned int i;
|
|
|
|
NV_KMALLOC(nvlfp, sizeof(nv_linux_file_private_t));
|
|
if (!nvlfp)
|
|
return NULL;
|
|
|
|
memset(nvlfp, 0, sizeof(nv_linux_file_private_t));
|
|
|
|
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
|
|
{
|
|
NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]);
|
|
}
|
|
init_waitqueue_head(&nvlfp->waitqueue);
|
|
NV_SPIN_LOCK_INIT(&nvlfp->fp_lock);
|
|
|
|
return nvlfp;
|
|
}
|
|
|
|
static void nv_free_file_private(nv_linux_file_private_t *nvlfp)
|
|
{
|
|
nvidia_event_t *nvet;
|
|
|
|
if (nvlfp == NULL)
|
|
return;
|
|
|
|
for (nvet = nvlfp->event_data_head; nvet != NULL; nvet = nvlfp->event_data_head)
|
|
{
|
|
nvlfp->event_data_head = nvlfp->event_data_head->next;
|
|
NV_KFREE(nvet, sizeof(nvidia_event_t));
|
|
}
|
|
|
|
if (nvlfp->mmap_context.page_array != NULL)
|
|
{
|
|
os_free_mem(nvlfp->mmap_context.page_array);
|
|
}
|
|
|
|
NV_KFREE(nvlfp, sizeof(nv_linux_file_private_t));
|
|
}
|
|
|
|
|
|
static int nv_is_control_device(
|
|
struct inode *inode
|
|
)
|
|
{
|
|
return (minor((inode)->i_rdev) == NV_CONTROL_DEVICE_MINOR);
|
|
}
|
|
|
|
/*
|
|
* Search the global list of nv devices for the one with the given minor device
|
|
* number. If found, nvl is returned with nvl->ldata_lock taken.
|
|
*/
|
|
static nv_linux_state_t *find_minor(NvU32 minor)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
nvl = nv_linux_devices;
|
|
while (nvl != NULL)
|
|
{
|
|
if (nvl->minor_num == minor)
|
|
{
|
|
down(&nvl->ldata_lock);
|
|
break;
|
|
}
|
|
nvl = nvl->next;
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return nvl;
|
|
}
|
|
|
|
/*
|
|
* Search the global list of nv devices for the one with the given gpu_id.
|
|
* If found, nvl is returned with nvl->ldata_lock taken.
|
|
*/
|
|
static nv_linux_state_t *find_gpu_id(NvU32 gpu_id)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
nvl = nv_linux_devices;
|
|
while (nvl != NULL)
|
|
{
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
if (nv->gpu_id == gpu_id)
|
|
{
|
|
down(&nvl->ldata_lock);
|
|
break;
|
|
}
|
|
nvl = nvl->next;
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return nvl;
|
|
}
|
|
|
|
/*
|
|
* Search the global list of nv devices for the one with the given UUID. Devices
|
|
* with missing UUID information are ignored. If found, nvl is returned with
|
|
* nvl->ldata_lock taken.
|
|
*/
|
|
nv_linux_state_t *find_uuid(const NvU8 *uuid)
|
|
{
|
|
nv_linux_state_t *nvl = NULL;
|
|
nv_state_t *nv;
|
|
const NvU8 *dev_uuid;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
|
|
{
|
|
nv = NV_STATE_PTR(nvl);
|
|
down(&nvl->ldata_lock);
|
|
dev_uuid = nv_get_cached_uuid(nv);
|
|
if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0)
|
|
goto out;
|
|
up(&nvl->ldata_lock);
|
|
}
|
|
|
|
out:
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return nvl;
|
|
}
|
|
|
|
/*
|
|
* Search the global list of nv devices. The search logic is:
|
|
*
|
|
* 1) If any device has the given UUID, return it
|
|
*
|
|
* 2) If no device has the given UUID but at least one device is missing
|
|
* its UUID (for example because rm_init_adapter has not run on it yet),
|
|
* return that device.
|
|
*
|
|
* 3) If no device has the given UUID and all UUIDs are present, return NULL.
|
|
*
|
|
* In cases 1 and 2, nvl is returned with nvl->ldata_lock taken.
|
|
*
|
|
* The reason for this weird logic is because UUIDs aren't always available. See
|
|
* bug 1642200.
|
|
*/
|
|
static nv_linux_state_t *find_uuid_candidate(const NvU8 *uuid)
|
|
{
|
|
nv_linux_state_t *nvl = NULL;
|
|
nv_state_t *nv;
|
|
const NvU8 *dev_uuid;
|
|
int use_missing;
|
|
int has_missing = 0;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
/*
|
|
* Take two passes through the list. The first pass just looks for the UUID.
|
|
* The second looks for the target or missing UUIDs. It would be nice if
|
|
* this could be done in a single pass by remembering which nvls are missing
|
|
* UUIDs, but we have to hold the nvl lock after we check for the UUID.
|
|
*/
|
|
for (use_missing = 0; use_missing <= 1; use_missing++)
|
|
{
|
|
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
|
|
{
|
|
nv = NV_STATE_PTR(nvl);
|
|
down(&nvl->ldata_lock);
|
|
dev_uuid = nv_get_cached_uuid(nv);
|
|
if (dev_uuid)
|
|
{
|
|
/* Case 1: If a device has the given UUID, return it */
|
|
if (memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0)
|
|
goto out;
|
|
}
|
|
else
|
|
{
|
|
/* Case 2: If no device has the given UUID but at least one
|
|
* device is missing its UUID, return that device. */
|
|
if (use_missing)
|
|
goto out;
|
|
has_missing = 1;
|
|
}
|
|
up(&nvl->ldata_lock);
|
|
}
|
|
|
|
/* Case 3: If no device has the given UUID and all UUIDs are present,
|
|
* return NULL. */
|
|
if (!has_missing)
|
|
break;
|
|
}
|
|
|
|
out:
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return nvl;
|
|
}
|
|
|
|
void nv_dev_free_stacks(nv_linux_state_t *nvl)
|
|
{
|
|
NvU32 i;
|
|
for (i = 0; i < NV_DEV_STACK_COUNT; i++)
|
|
{
|
|
if (nvl->sp[i])
|
|
{
|
|
nv_kmem_cache_free_stack(nvl->sp[i]);
|
|
nvl->sp[i] = NULL;
|
|
}
|
|
}
|
|
}
|
|
|
|
static int nv_dev_alloc_stacks(nv_linux_state_t *nvl)
|
|
{
|
|
NvU32 i;
|
|
int rc;
|
|
|
|
for (i = 0; i < NV_DEV_STACK_COUNT; i++)
|
|
{
|
|
rc = nv_kmem_cache_alloc_stack(&nvl->sp[i]);
|
|
if (rc != 0)
|
|
{
|
|
nv_dev_free_stacks(nvl);
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int validate_numa_start_state(nv_linux_state_t *nvl)
|
|
{
|
|
int rc = 0;
|
|
int numa_status = nv_get_numa_status(nvl);
|
|
|
|
if (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED)
|
|
{
|
|
if (nv_ctl_device.numa_memblock_size == 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: numa memblock size of zero "
|
|
"found during device start");
|
|
rc = -EINVAL;
|
|
}
|
|
else
|
|
{
|
|
/* Keep the individual devices consistent with the control device */
|
|
nvl->numa_memblock_size = nv_ctl_device.numa_memblock_size;
|
|
}
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances)
|
|
{
|
|
*num_instances = nv->num_dpaux_instance;
|
|
return NV_OK;
|
|
}
|
|
|
|
void NV_API_CALL
|
|
nv_schedule_uvm_isr(nv_state_t *nv)
|
|
{
|
|
#if defined(NV_UVM_ENABLE)
|
|
nv_uvm_event_interrupt(nv_get_cached_uuid(nv));
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Brings up the device on the first file open. Assumes nvl->ldata_lock is held.
|
|
*/
|
|
static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
NvU32 msi_config = 0;
|
|
#endif
|
|
int rc = 0;
|
|
NvBool kthread_init = NV_FALSE;
|
|
NvBool power_ref = NV_FALSE;
|
|
|
|
rc = nv_get_rsync_info();
|
|
if (rc != 0)
|
|
{
|
|
return rc;
|
|
}
|
|
|
|
rc = validate_numa_start_state(nvl);
|
|
if (rc != 0)
|
|
{
|
|
goto failed;
|
|
}
|
|
|
|
if (nv_dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: open of non-existent GPU with minor number %d\n", nvl->minor_num);
|
|
rc = -ENXIO;
|
|
goto failed;
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK)
|
|
{
|
|
rc = -EINVAL;
|
|
goto failed;
|
|
}
|
|
power_ref = NV_TRUE;
|
|
}
|
|
else
|
|
{
|
|
if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE) != NV_OK)
|
|
{
|
|
rc = -EINVAL;
|
|
goto failed;
|
|
}
|
|
power_ref = NV_TRUE;
|
|
}
|
|
|
|
rc = nv_init_ibmnpu_devices(nv);
|
|
if (rc != 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to initialize ibmnpu devices attached to GPU with minor number %d\n",
|
|
nvl->minor_num);
|
|
goto failed;
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
rc = nv_dev_alloc_stacks(nvl);
|
|
if (rc != 0)
|
|
goto failed;
|
|
}
|
|
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
if (nv_dev_is_pci(nvl->dev))
|
|
{
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config);
|
|
if (msi_config == 1)
|
|
{
|
|
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX))
|
|
{
|
|
nv_init_msix(nv);
|
|
}
|
|
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) &&
|
|
!(nv->flags & NV_FLAG_USES_MSIX))
|
|
{
|
|
nv_init_msi(nv);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX)))
|
|
&& (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY)
|
|
&& !(nv->flags & NV_FLAG_SOC_IGPU))
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"No interrupts of any type are available. Cannot use this GPU.\n");
|
|
rc = -EIO;
|
|
goto failed;
|
|
}
|
|
|
|
rc = 0;
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
|
{
|
|
|
|
rc = nv_soc_register_irqs(nv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else if (!(nv->flags & NV_FLAG_USES_MSIX))
|
|
{
|
|
rc = request_threaded_irq(nv->interrupt_line, nvidia_isr,
|
|
nvidia_isr_kthread_bh, nv_default_irq_flags(nv),
|
|
nv_device_name, (void *)nvl);
|
|
}
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
else
|
|
{
|
|
rc = nv_request_msix_irq(nvl);
|
|
}
|
|
#endif
|
|
}
|
|
if (rc != 0)
|
|
{
|
|
if ((nv->interrupt_line != 0) && (rc == -EBUSY))
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"Tried to get IRQ %d, but another driver\n",
|
|
(unsigned int) nv->interrupt_line);
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n");
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: You may want to verify that no audio driver");
|
|
nv_printf(NV_DBG_ERRORS, " is using the IRQ.\n");
|
|
}
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "request_irq() failed (%d)\n", rc);
|
|
goto failed;
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
rc = os_alloc_mutex(&nvl->isr_bh_unlocked_mutex);
|
|
if (rc != 0)
|
|
goto failed;
|
|
nv_kthread_q_item_init(&nvl->bottom_half_q_item, nvidia_isr_bh_unlocked, (void *)nv);
|
|
rc = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name);
|
|
if (rc != 0)
|
|
goto failed;
|
|
kthread_init = NV_TRUE;
|
|
|
|
rc = nv_kthread_q_init(&nvl->queue.nvk, "nv_queue");
|
|
if (rc)
|
|
goto failed;
|
|
nv->queue = &nvl->queue;
|
|
}
|
|
|
|
if (!rm_init_adapter(sp, nv))
|
|
{
|
|
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
|
|
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
|
|
!(nv->flags & NV_FLAG_SOC_IGPU))
|
|
{
|
|
free_irq(nv->interrupt_line, (void *) nvl);
|
|
}
|
|
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
|
{
|
|
|
|
nv_soc_free_irqs(nv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
else
|
|
{
|
|
nv_free_msix_irq(nvl);
|
|
}
|
|
#endif
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"rm_init_adapter failed, device minor number %d\n",
|
|
nvl->minor_num);
|
|
rc = -EIO;
|
|
goto failed;
|
|
}
|
|
|
|
{
|
|
const NvU8 *uuid = rm_get_gpu_uuid_raw(sp, nv);
|
|
|
|
if (uuid != NULL)
|
|
{
|
|
#if defined(NV_UVM_ENABLE)
|
|
nv_uvm_notify_start_device(uuid);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
nv_acpi_register_notifier(nvl);
|
|
}
|
|
|
|
nv->flags |= NV_FLAG_OPEN;
|
|
|
|
/*
|
|
* Now that RM init is done, allow dynamic power to control the GPU in FINE
|
|
* mode, if enabled. (If the mode is COARSE, this unref will do nothing
|
|
* which will cause the GPU to remain powered up.)
|
|
* This is balanced by a FINE ref increment at the beginning of
|
|
* nv_stop_device().
|
|
*/
|
|
rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE);
|
|
|
|
return 0;
|
|
|
|
failed:
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
if (nv->flags & NV_FLAG_USES_MSI)
|
|
{
|
|
nv->flags &= ~NV_FLAG_USES_MSI;
|
|
NV_PCI_DISABLE_MSI(nvl->pci_dev);
|
|
if(nvl->irq_count)
|
|
NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t));
|
|
}
|
|
if (nv->flags & NV_FLAG_USES_MSIX)
|
|
{
|
|
nv->flags &= ~NV_FLAG_USES_MSIX;
|
|
pci_disable_msix(nvl->pci_dev);
|
|
NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t));
|
|
NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry));
|
|
}
|
|
|
|
if (nvl->msix_bh_mutex)
|
|
{
|
|
os_free_mutex(nvl->msix_bh_mutex);
|
|
nvl->msix_bh_mutex = NULL;
|
|
}
|
|
#endif
|
|
|
|
if (nv->queue && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
nv->queue = NULL;
|
|
nv_kthread_q_stop(&nvl->queue.nvk);
|
|
}
|
|
|
|
if (kthread_init && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
nv_kthread_q_stop(&nvl->bottom_half_q);
|
|
|
|
if (nvl->isr_bh_unlocked_mutex)
|
|
{
|
|
os_free_mutex(nvl->isr_bh_unlocked_mutex);
|
|
nvl->isr_bh_unlocked_mutex = NULL;
|
|
}
|
|
|
|
nv_dev_free_stacks(nvl);
|
|
|
|
nv_unregister_ibmnpu_devices(nv);
|
|
|
|
if (power_ref)
|
|
{
|
|
rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE);
|
|
}
|
|
|
|
nv_put_rsync_info();
|
|
|
|
return rc;
|
|
}
|
|
|
|
/*
|
|
* Makes sure the device is ready for operations and increases nvl->usage_count.
|
|
* Assumes nvl->ldata_lock is held.
|
|
*/
|
|
static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
int rc;
|
|
NV_STATUS status;
|
|
|
|
if (os_is_vgx_hyper())
|
|
{
|
|
/* fail open if GPU is being unbound */
|
|
if (nv->flags & NV_FLAG_UNBIND_LOCK)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"Open failed as GPU is locked for unbind operation\n");
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Opening GPU with minor number %d\n",
|
|
nvl->minor_num);
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Device in removal process\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
if ( ! (nv->flags & NV_FLAG_OPEN))
|
|
{
|
|
/* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */
|
|
if (NV_ATOMIC_READ(nvl->usage_count) != 0)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"Minor device %u is referenced without being open!\n",
|
|
nvl->minor_num);
|
|
WARN_ON(1);
|
|
return -EBUSY;
|
|
}
|
|
|
|
rc = nv_start_device(nv, sp);
|
|
if (rc != 0)
|
|
return rc;
|
|
}
|
|
else if (rm_is_device_sequestered(sp, nv))
|
|
{
|
|
/* Do not increment the usage count of sequestered devices. */
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Device is currently unavailable\n");
|
|
return -EBUSY;
|
|
}
|
|
|
|
NV_ATOMIC_INC(nvl->usage_count);
|
|
return 0;
|
|
}
|
|
|
|
static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
|
|
struct file *file,
|
|
nv_linux_file_private_t *nvlfp,
|
|
struct inode *inode)
|
|
{
|
|
down(&nvl->mmap_lock);
|
|
|
|
/* Set up struct address_space for use with unmap_mapping_range() */
|
|
nv_address_space_init_once(&nvlfp->mapping);
|
|
nvlfp->mapping.host = inode;
|
|
nvlfp->mapping.a_ops = inode->i_mapping->a_ops;
|
|
#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO)
|
|
nvlfp->mapping.backing_dev_info = inode->i_mapping->backing_dev_info;
|
|
#endif
|
|
file->f_mapping = &nvlfp->mapping;
|
|
|
|
/* Add nvlfp to list of open files in nvl for mapping revocation */
|
|
list_add(&nvlfp->entry, &nvl->open_files);
|
|
|
|
up(&nvl->mmap_lock);
|
|
}
|
|
|
|
/*
|
|
** nvidia_open
|
|
**
|
|
** nv driver open entry point. Sessions are created here.
|
|
*/
|
|
int
|
|
nvidia_open(
|
|
struct inode *inode,
|
|
struct file *file
|
|
)
|
|
{
|
|
nv_state_t *nv = NULL;
|
|
nv_linux_state_t *nvl = NULL;
|
|
int rc = 0;
|
|
nv_linux_file_private_t *nvlfp = NULL;
|
|
nvidia_stack_t *sp = NULL;
|
|
unsigned int i;
|
|
unsigned int k;
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: nvidia_open...\n");
|
|
|
|
nvlfp = nv_alloc_file_private();
|
|
if (nvlfp == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate file private!\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
rc = nv_kmem_cache_alloc_stack(&sp);
|
|
if (rc != 0)
|
|
{
|
|
nv_free_file_private(nvlfp);
|
|
return rc;
|
|
}
|
|
|
|
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
|
|
{
|
|
rc = nv_kmem_cache_alloc_stack(&nvlfp->fops_sp[i]);
|
|
if (rc != 0)
|
|
{
|
|
nv_kmem_cache_free_stack(sp);
|
|
for (k = 0; k < i; ++k)
|
|
{
|
|
nv_kmem_cache_free_stack(nvlfp->fops_sp[k]);
|
|
}
|
|
nv_free_file_private(nvlfp);
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
NV_SET_FILE_PRIVATE(file, nvlfp);
|
|
nvlfp->sp = sp;
|
|
|
|
/* for control device, just jump to its open routine */
|
|
/* after setting up the private data */
|
|
if (nv_is_control_device(inode))
|
|
{
|
|
rc = nvidia_ctl_open(inode, file);
|
|
if (rc != 0)
|
|
goto failed;
|
|
return rc;
|
|
}
|
|
|
|
rc = nv_down_read_interruptible(&nv_system_pm_lock);
|
|
if (rc < 0)
|
|
goto failed;
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_minor(NV_DEVICE_MINOR_NUMBER(inode));
|
|
if (!nvl)
|
|
{
|
|
rc = -ENODEV;
|
|
up_read(&nv_system_pm_lock);
|
|
goto failed;
|
|
}
|
|
|
|
nvlfp->nvptr = nvl;
|
|
nv = NV_STATE_PTR(nvl);
|
|
|
|
if ((nv->flags & NV_FLAG_EXCLUDE) != 0)
|
|
{
|
|
char *uuid = rm_get_gpu_uuid(sp, nv);
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"open() not permitted for excluded %s\n",
|
|
(uuid != NULL) ? uuid : "GPU");
|
|
if (uuid != NULL)
|
|
os_free_mem(uuid);
|
|
rc = -EPERM;
|
|
goto failed1;
|
|
}
|
|
|
|
rc = nv_open_device(nv, sp);
|
|
/* Fall-through on error */
|
|
|
|
nv_assert_not_in_gpu_exclusion_list(sp, nv);
|
|
|
|
failed1:
|
|
up(&nvl->ldata_lock);
|
|
|
|
up_read(&nv_system_pm_lock);
|
|
failed:
|
|
if (rc != 0)
|
|
{
|
|
if (nvlfp != NULL)
|
|
{
|
|
nv_kmem_cache_free_stack(sp);
|
|
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
|
|
{
|
|
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
|
|
}
|
|
nv_free_file_private(nvlfp);
|
|
NV_SET_FILE_PRIVATE(file, NULL);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void validate_numa_shutdown_state(nv_linux_state_t *nvl)
|
|
{
|
|
int numa_status = nv_get_numa_status(nvl);
|
|
WARN_ON((numa_status != NV_IOCTL_NUMA_STATUS_OFFLINE) &&
|
|
(numa_status != NV_IOCTL_NUMA_STATUS_DISABLED));
|
|
}
|
|
|
|
void nv_shutdown_adapter(nvidia_stack_t *sp,
|
|
nv_state_t *nv,
|
|
nv_linux_state_t *nvl)
|
|
{
|
|
validate_numa_shutdown_state(nvl);
|
|
|
|
rm_disable_adapter(sp, nv);
|
|
|
|
// It's safe to call nv_kthread_q_stop even if queue is not initialized
|
|
nv_kthread_q_stop(&nvl->bottom_half_q);
|
|
|
|
if (nv->queue != NULL)
|
|
{
|
|
nv->queue = NULL;
|
|
nv_kthread_q_stop(&nvl->queue.nvk);
|
|
}
|
|
|
|
if (nvl->isr_bh_unlocked_mutex)
|
|
{
|
|
os_free_mutex(nvl->isr_bh_unlocked_mutex);
|
|
nvl->isr_bh_unlocked_mutex = NULL;
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
|
|
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
|
|
!(nv->flags & NV_FLAG_SOC_IGPU))
|
|
{
|
|
free_irq(nv->interrupt_line, (void *)nvl);
|
|
if (nv->flags & NV_FLAG_USES_MSI)
|
|
{
|
|
NV_PCI_DISABLE_MSI(nvl->pci_dev);
|
|
if(nvl->irq_count)
|
|
NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t));
|
|
}
|
|
}
|
|
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
|
|
{
|
|
|
|
nv_soc_free_irqs(nv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
|
|
else
|
|
{
|
|
nv_free_msix_irq(nvl);
|
|
pci_disable_msix(nvl->pci_dev);
|
|
nv->flags &= ~NV_FLAG_USES_MSIX;
|
|
NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry));
|
|
NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t));
|
|
}
|
|
#endif
|
|
|
|
if (nvl->msix_bh_mutex)
|
|
{
|
|
os_free_mutex(nvl->msix_bh_mutex);
|
|
nvl->msix_bh_mutex = NULL;
|
|
}
|
|
|
|
rm_shutdown_adapter(sp, nv);
|
|
}
|
|
|
|
/*
|
|
* Tears down the device on the last file close. Assumes nvl->ldata_lock is
|
|
* held.
|
|
*/
|
|
static void nv_stop_device(nv_state_t *nv, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
static int persistence_mode_notice_logged;
|
|
|
|
/*
|
|
* The GPU needs to be powered on to go through the teardown sequence.
|
|
* This balances the FINE unref at the end of nv_start_device().
|
|
*/
|
|
rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE);
|
|
|
|
#if defined(NV_UVM_ENABLE)
|
|
{
|
|
const NvU8* uuid;
|
|
// Inform UVM before disabling adapter. Use cached copy
|
|
uuid = nv_get_cached_uuid(nv);
|
|
if (uuid != NULL)
|
|
{
|
|
// this function cannot fail
|
|
nv_uvm_notify_stop_device(uuid);
|
|
}
|
|
}
|
|
#endif
|
|
/* Adapter is already shutdown as part of nvidia_pci_remove */
|
|
if (!nv->removed)
|
|
{
|
|
if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE)
|
|
{
|
|
rm_disable_adapter(sp, nv);
|
|
}
|
|
else
|
|
{
|
|
nv_acpi_unregister_notifier(nvl);
|
|
nv_shutdown_adapter(sp, nv, nvl);
|
|
}
|
|
}
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
nv_dev_free_stacks(nvl);
|
|
}
|
|
|
|
if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) &&
|
|
(!persistence_mode_notice_logged) && (!os_is_vgx_hyper()))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: Persistence mode is deprecated and"
|
|
" will be removed in a future release. Please use"
|
|
" nvidia-persistenced instead.\n");
|
|
persistence_mode_notice_logged = 1;
|
|
}
|
|
|
|
/* leave INIT flag alone so we don't reinit every time */
|
|
nv->flags &= ~NV_FLAG_OPEN;
|
|
|
|
nv_unregister_ibmnpu_devices(nv);
|
|
|
|
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
|
|
{
|
|
rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE);
|
|
}
|
|
else
|
|
{
|
|
/* If in legacy persistence mode, only unref FINE refcount. */
|
|
rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE);
|
|
}
|
|
|
|
nv_put_rsync_info();
|
|
}
|
|
|
|
/*
|
|
* Decreases nvl->usage_count, stopping the device when it reaches 0. Assumes
|
|
* nvl->ldata_lock is held.
|
|
*/
|
|
static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: Attempting to close unopened minor device %u!\n",
|
|
nvl->minor_num);
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
|
|
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
|
|
nv_stop_device(nv, sp);
|
|
}
|
|
|
|
/*
|
|
** nvidia_close
|
|
**
|
|
** Primary driver close entry point.
|
|
*/
|
|
|
|
static void
|
|
nvidia_close_callback(
|
|
nv_linux_file_private_t *nvlfp
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = nvlfp->nvptr;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nvidia_stack_t *sp = nvlfp->sp;
|
|
unsigned int i;
|
|
NvBool bRemove = NV_FALSE;
|
|
|
|
rm_cleanup_file_private(sp, nv, &nvlfp->nvfp);
|
|
|
|
down(&nvl->mmap_lock);
|
|
list_del(&nvlfp->entry);
|
|
up(&nvl->mmap_lock);
|
|
|
|
down(&nvl->ldata_lock);
|
|
nv_close_device(nv, sp);
|
|
|
|
bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) &&
|
|
(NV_ATOMIC_READ(nvl->usage_count) == 0) &&
|
|
rm_get_device_remove_flag(sp, nv->gpu_id);
|
|
|
|
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
|
|
{
|
|
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
|
|
}
|
|
|
|
nv_free_file_private(nvlfp);
|
|
|
|
/*
|
|
* In case of surprise removal of device, we have 2 cases as below:
|
|
*
|
|
* 1> When nvidia_pci_remove is scheduled prior to nvidia_close.
|
|
* nvidia_pci_remove will not destroy linux layer locks & nv linux state
|
|
* struct but will set variable nv->removed for nvidia_close.
|
|
* Once all the clients are closed, last nvidia_close will clean up linux
|
|
* layer locks and nv linux state struct.
|
|
*
|
|
* 2> When nvidia_close is scheduled prior to nvidia_pci_remove.
|
|
* This will be treated as normal working case. nvidia_close will not do
|
|
* any cleanup related to linux layer locks and nv linux state struct.
|
|
* nvidia_pci_remove when scheduled will do necessary cleanup.
|
|
*/
|
|
if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed)
|
|
{
|
|
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
|
|
nv_lock_destroy_locks(sp, nv);
|
|
NV_KFREE(nvl, sizeof(nv_linux_state_t));
|
|
}
|
|
else
|
|
{
|
|
up(&nvl->ldata_lock);
|
|
|
|
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
|
|
if (bRemove)
|
|
{
|
|
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(nvl->pci_dev);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
}
|
|
|
|
static void nvidia_close_deferred(void *data)
|
|
{
|
|
nv_linux_file_private_t *nvlfp = data;
|
|
|
|
down_read(&nv_system_pm_lock);
|
|
|
|
nvidia_close_callback(nvlfp);
|
|
|
|
up_read(&nv_system_pm_lock);
|
|
}
|
|
|
|
int
|
|
nvidia_close(
|
|
struct inode *inode,
|
|
struct file *file
|
|
)
|
|
{
|
|
int rc;
|
|
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
|
nv_linux_state_t *nvl = nvlfp->nvptr;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "nvidia_close on GPU with minor number %d\n", NV_DEVICE_MINOR_NUMBER(inode));
|
|
|
|
if (nv_is_control_device(inode))
|
|
{
|
|
return nvidia_ctl_close(inode, file);
|
|
}
|
|
|
|
NV_SET_FILE_PRIVATE(file, NULL);
|
|
|
|
rc = nv_down_read_interruptible(&nv_system_pm_lock);
|
|
if (rc == 0)
|
|
{
|
|
nvidia_close_callback(nvlfp);
|
|
up_read(&nv_system_pm_lock);
|
|
}
|
|
else
|
|
{
|
|
nv_kthread_q_item_init(&nvlfp->deferred_close_q_item,
|
|
nvidia_close_deferred,
|
|
nvlfp);
|
|
rc = nv_kthread_q_schedule_q_item(&nv_deferred_close_kthread_q,
|
|
&nvlfp->deferred_close_q_item);
|
|
WARN_ON(rc == 0);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
unsigned int
|
|
nvidia_poll(
|
|
struct file *file,
|
|
poll_table *wait
|
|
)
|
|
{
|
|
unsigned int mask = 0;
|
|
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
|
unsigned long eflags;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
NV_STATUS status;
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nvidia_poll\n");
|
|
return POLLHUP;
|
|
}
|
|
|
|
if ((file->f_flags & O_NONBLOCK) == 0)
|
|
poll_wait(file, &nvlfp->waitqueue, wait);
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags);
|
|
|
|
if ((nvlfp->event_data_head != NULL) || nvlfp->dataless_event_pending)
|
|
{
|
|
mask = (POLLPRI | POLLIN);
|
|
nvlfp->dataless_event_pending = NV_FALSE;
|
|
}
|
|
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags);
|
|
|
|
return mask;
|
|
}
|
|
|
|
#define NV_CTL_DEVICE_ONLY(nv) \
|
|
{ \
|
|
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \
|
|
{ \
|
|
status = -EINVAL; \
|
|
goto done; \
|
|
} \
|
|
}
|
|
|
|
#define NV_ACTUAL_DEVICE_ONLY(nv) \
|
|
{ \
|
|
if (((nv)->flags & NV_FLAG_CONTROL) != 0) \
|
|
{ \
|
|
status = -EINVAL; \
|
|
goto done; \
|
|
} \
|
|
}
|
|
|
|
/*
|
|
* Fills the ci array with the state of num_entries devices. Returns -EINVAL if
|
|
* num_entries isn't big enough to hold all available devices.
|
|
*/
|
|
static int nvidia_read_card_info(nv_ioctl_card_info_t *ci, size_t num_entries)
|
|
{
|
|
nv_state_t *nv;
|
|
nv_linux_state_t *nvl;
|
|
size_t i = 0;
|
|
int rc = 0;
|
|
|
|
/* Clear each card's flags field the lazy way */
|
|
memset(ci, 0, num_entries * sizeof(ci[0]));
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
if (num_entries < num_nv_devices)
|
|
{
|
|
rc = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
for (nvl = nv_linux_devices; nvl && i < num_entries; nvl = nvl->next)
|
|
{
|
|
nv = NV_STATE_PTR(nvl);
|
|
|
|
/* We do not include excluded GPUs in the list... */
|
|
if ((nv->flags & NV_FLAG_EXCLUDE) != 0)
|
|
continue;
|
|
|
|
ci[i].valid = NV_TRUE;
|
|
ci[i].pci_info.domain = nv->pci_info.domain;
|
|
ci[i].pci_info.bus = nv->pci_info.bus;
|
|
ci[i].pci_info.slot = nv->pci_info.slot;
|
|
ci[i].pci_info.vendor_id = nv->pci_info.vendor_id;
|
|
ci[i].pci_info.device_id = nv->pci_info.device_id;
|
|
ci[i].gpu_id = nv->gpu_id;
|
|
ci[i].interrupt_line = nv->interrupt_line;
|
|
ci[i].reg_address = nv->regs->cpu_address;
|
|
ci[i].reg_size = nv->regs->size;
|
|
ci[i].minor_number = nvl->minor_num;
|
|
if (nv_dev_is_pci(nvl->dev))
|
|
{
|
|
ci[i].fb_address = nv->fb->cpu_address;
|
|
ci[i].fb_size = nv->fb->size;
|
|
}
|
|
i++;
|
|
}
|
|
|
|
out:
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return rc;
|
|
}
|
|
|
|
int
|
|
nvidia_ioctl(
|
|
struct inode *inode,
|
|
struct file *file,
|
|
unsigned int cmd,
|
|
unsigned long i_arg)
|
|
{
|
|
NV_STATUS rmStatus;
|
|
int status = 0;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
|
nvidia_stack_t *sp = NULL;
|
|
nv_ioctl_xfer_t ioc_xfer;
|
|
void *arg_ptr = (void *) i_arg;
|
|
void *arg_copy = NULL;
|
|
size_t arg_size = 0;
|
|
int arg_cmd;
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n",
|
|
_IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd));
|
|
|
|
status = nv_down_read_interruptible(&nv_system_pm_lock);
|
|
if (status < 0)
|
|
return status;
|
|
|
|
down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]);
|
|
sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_IOCTL];
|
|
|
|
rmStatus = nv_check_gpu_state(nv);
|
|
if (rmStatus == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n");
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
arg_size = _IOC_SIZE(cmd);
|
|
arg_cmd = _IOC_NR(cmd);
|
|
|
|
if (arg_cmd == NV_ESC_IOCTL_XFER_CMD)
|
|
{
|
|
if (arg_size != sizeof(nv_ioctl_xfer_t))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: invalid ioctl XFER structure size!\n");
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
if (NV_COPY_FROM_USER(&ioc_xfer, arg_ptr, sizeof(ioc_xfer)))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to copy in ioctl XFER data!\n");
|
|
status = -EFAULT;
|
|
goto done;
|
|
}
|
|
|
|
arg_cmd = ioc_xfer.cmd;
|
|
arg_size = ioc_xfer.size;
|
|
arg_ptr = NvP64_VALUE(ioc_xfer.ptr);
|
|
|
|
if (arg_size > NV_ABSOLUTE_MAX_IOCTL_SIZE)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: invalid ioctl XFER size!\n");
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
NV_KMALLOC(arg_copy, arg_size);
|
|
if (arg_copy == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n");
|
|
status = -ENOMEM;
|
|
goto done;
|
|
}
|
|
|
|
if (NV_COPY_FROM_USER(arg_copy, arg_ptr, arg_size))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data!\n");
|
|
status = -EFAULT;
|
|
goto done;
|
|
}
|
|
|
|
switch (arg_cmd)
|
|
{
|
|
case NV_ESC_QUERY_DEVICE_INTR:
|
|
{
|
|
nv_ioctl_query_device_intr *query_intr = arg_copy;
|
|
|
|
NV_ACTUAL_DEVICE_ONLY(nv);
|
|
|
|
if ((arg_size < sizeof(*query_intr)) ||
|
|
(!nv->regs->map))
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
query_intr->intrStatus =
|
|
*(nv->regs->map + (NV_RM_DEVICE_INTR_ADDRESS >> 2));
|
|
query_intr->status = NV_OK;
|
|
break;
|
|
}
|
|
|
|
/* pass out info about the card */
|
|
case NV_ESC_CARD_INFO:
|
|
{
|
|
size_t num_arg_devices = arg_size / sizeof(nv_ioctl_card_info_t);
|
|
|
|
NV_CTL_DEVICE_ONLY(nv);
|
|
|
|
status = nvidia_read_card_info(arg_copy, num_arg_devices);
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_ATTACH_GPUS_TO_FD:
|
|
{
|
|
size_t num_arg_gpus = arg_size / sizeof(NvU32);
|
|
size_t i;
|
|
|
|
NV_CTL_DEVICE_ONLY(nv);
|
|
|
|
if (num_arg_gpus == 0 || nvlfp->num_attached_gpus != 0 ||
|
|
arg_size % sizeof(NvU32) != 0)
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
NV_KMALLOC(nvlfp->attached_gpus, arg_size);
|
|
if (nvlfp->attached_gpus == NULL)
|
|
{
|
|
status = -ENOMEM;
|
|
goto done;
|
|
}
|
|
memcpy(nvlfp->attached_gpus, arg_copy, arg_size);
|
|
nvlfp->num_attached_gpus = num_arg_gpus;
|
|
|
|
for (i = 0; i < nvlfp->num_attached_gpus; i++)
|
|
{
|
|
if (nvlfp->attached_gpus[i] == 0)
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (nvidia_dev_get(nvlfp->attached_gpus[i], sp))
|
|
{
|
|
while (i--)
|
|
{
|
|
if (nvlfp->attached_gpus[i] != 0)
|
|
nvidia_dev_put(nvlfp->attached_gpus[i], sp);
|
|
}
|
|
NV_KFREE(nvlfp->attached_gpus, arg_size);
|
|
nvlfp->num_attached_gpus = 0;
|
|
|
|
status = -EINVAL;
|
|
break;
|
|
}
|
|
}
|
|
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_CHECK_VERSION_STR:
|
|
{
|
|
NV_CTL_DEVICE_ONLY(nv);
|
|
|
|
rmStatus = rm_perform_version_check(sp, arg_copy, arg_size);
|
|
status = ((rmStatus == NV_OK) ? 0 : -EINVAL);
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_SYS_PARAMS:
|
|
{
|
|
nv_ioctl_sys_params_t *api = arg_copy;
|
|
|
|
NV_CTL_DEVICE_ONLY(nv);
|
|
|
|
if (arg_size != sizeof(nv_ioctl_sys_params_t))
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
/* numa_memblock_size should only be set once */
|
|
if (nvl->numa_memblock_size == 0)
|
|
{
|
|
nvl->numa_memblock_size = api->memblock_size;
|
|
}
|
|
else
|
|
{
|
|
status = (nvl->numa_memblock_size == api->memblock_size) ?
|
|
0 : -EBUSY;
|
|
goto done;
|
|
}
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_NUMA_INFO:
|
|
{
|
|
nv_ioctl_numa_info_t *api = arg_copy;
|
|
rmStatus = NV_OK;
|
|
|
|
NV_ACTUAL_DEVICE_ONLY(nv);
|
|
|
|
if (arg_size != sizeof(nv_ioctl_numa_info_t))
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
api->offline_addresses.numEntries =
|
|
ARRAY_SIZE(api->offline_addresses.addresses),
|
|
|
|
rmStatus = rm_get_gpu_numa_info(sp, nv,
|
|
&(api->nid),
|
|
&(api->numa_mem_addr),
|
|
&(api->numa_mem_size),
|
|
(api->offline_addresses.addresses),
|
|
&(api->offline_addresses.numEntries));
|
|
if (rmStatus != NV_OK)
|
|
{
|
|
status = -EBUSY;
|
|
goto done;
|
|
}
|
|
|
|
api->status = nv_get_numa_status(nvl);
|
|
api->memblock_size = nv_ctl_device.numa_memblock_size;
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_SET_NUMA_STATUS:
|
|
{
|
|
nv_ioctl_set_numa_status_t *api = arg_copy;
|
|
rmStatus = NV_OK;
|
|
|
|
if (!NV_IS_SUSER())
|
|
{
|
|
status = -EACCES;
|
|
goto done;
|
|
}
|
|
|
|
NV_ACTUAL_DEVICE_ONLY(nv);
|
|
|
|
if (arg_size != sizeof(nv_ioctl_set_numa_status_t))
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
/*
|
|
* The nv_linux_state_t for the device needs to be locked
|
|
* in order to prevent additional open()/close() calls from
|
|
* manipulating the usage count for the device while we
|
|
* determine if NUMA state can be changed.
|
|
*/
|
|
down(&nvl->ldata_lock);
|
|
|
|
if (nv_get_numa_status(nvl) != api->status)
|
|
{
|
|
if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS)
|
|
{
|
|
/*
|
|
* Only the current client should have an open file
|
|
* descriptor for the device, to allow safe offlining.
|
|
*/
|
|
if (NV_ATOMIC_READ(nvl->usage_count) > 1)
|
|
{
|
|
status = -EBUSY;
|
|
goto unlock;
|
|
}
|
|
else
|
|
{
|
|
/*
|
|
* If this call fails, it indicates that RM
|
|
* is not ready to offline memory, and we should keep
|
|
* the current NUMA status of ONLINE.
|
|
*/
|
|
rmStatus = rm_gpu_numa_offline(sp, nv);
|
|
if (rmStatus != NV_OK)
|
|
{
|
|
status = -EBUSY;
|
|
goto unlock;
|
|
}
|
|
}
|
|
}
|
|
|
|
status = nv_set_numa_status(nvl, api->status);
|
|
if (status < 0)
|
|
{
|
|
if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS)
|
|
(void) rm_gpu_numa_online(sp, nv);
|
|
goto unlock;
|
|
}
|
|
|
|
if (api->status == NV_IOCTL_NUMA_STATUS_ONLINE)
|
|
{
|
|
rmStatus = rm_gpu_numa_online(sp, nv);
|
|
if (rmStatus != NV_OK)
|
|
{
|
|
status = -EBUSY;
|
|
goto unlock;
|
|
}
|
|
}
|
|
}
|
|
|
|
unlock:
|
|
up(&nvl->ldata_lock);
|
|
|
|
break;
|
|
}
|
|
|
|
case NV_ESC_EXPORT_TO_DMABUF_FD:
|
|
{
|
|
nv_ioctl_export_to_dma_buf_fd_t *params = arg_copy;
|
|
|
|
if (arg_size != sizeof(nv_ioctl_export_to_dma_buf_fd_t))
|
|
{
|
|
status = -EINVAL;
|
|
goto done;
|
|
}
|
|
|
|
NV_ACTUAL_DEVICE_ONLY(nv);
|
|
|
|
params->status = nv_dma_buf_export(nv, params);
|
|
|
|
break;
|
|
}
|
|
|
|
default:
|
|
rmStatus = rm_ioctl(sp, nv, &nvlfp->nvfp, arg_cmd, arg_copy, arg_size);
|
|
status = ((rmStatus == NV_OK) ? 0 : -EINVAL);
|
|
break;
|
|
}
|
|
|
|
done:
|
|
up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]);
|
|
|
|
up_read(&nv_system_pm_lock);
|
|
|
|
if (arg_copy != NULL)
|
|
{
|
|
if (status != -EFAULT)
|
|
{
|
|
if (NV_COPY_TO_USER(arg_ptr, arg_copy, arg_size))
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy out ioctl data\n");
|
|
status = -EFAULT;
|
|
}
|
|
}
|
|
NV_KFREE(arg_copy, arg_size);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
irqreturn_t
|
|
nvidia_isr_msix(
|
|
int irq,
|
|
void *arg
|
|
)
|
|
{
|
|
irqreturn_t ret;
|
|
nv_linux_state_t *nvl = (void *) arg;
|
|
|
|
// nvidia_isr_msix() is called for each of the MSI-X vectors and they can
|
|
// run in parallel on different CPUs (cores), but this is not currently
|
|
// supported by nvidia_isr() and its children. As a big hammer fix just
|
|
// spinlock around the nvidia_isr() call to serialize them.
|
|
//
|
|
// At this point interrupts are disabled on the CPU running our ISR (see
|
|
// comments for nv_default_irq_flags()) so a plain spinlock is enough.
|
|
NV_SPIN_LOCK(&nvl->msix_isr_lock);
|
|
|
|
ret = nvidia_isr(irq, arg);
|
|
|
|
NV_SPIN_UNLOCK(&nvl->msix_isr_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* driver receives an interrupt
|
|
* if someone waiting, then hand it off.
|
|
*/
|
|
irqreturn_t
|
|
nvidia_isr(
|
|
int irq,
|
|
void *arg
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = (void *) arg;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
NvU32 need_to_run_bottom_half_gpu_lock_held = 0;
|
|
NvBool rm_handled = NV_FALSE, uvm_handled = NV_FALSE, rm_fault_handling_needed = NV_FALSE;
|
|
NvU32 rm_serviceable_fault_cnt = 0;
|
|
NvU32 sec, usec;
|
|
NvU16 index = 0;
|
|
NvU64 currentTime = 0;
|
|
NvBool found_irq = NV_FALSE;
|
|
|
|
rm_gpu_handle_mmu_faults(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt);
|
|
rm_fault_handling_needed = (rm_serviceable_fault_cnt != 0);
|
|
|
|
#if defined (NV_UVM_ENABLE)
|
|
//
|
|
// Returns NV_OK if the UVM driver handled the interrupt
|
|
//
|
|
// Returns NV_ERR_NO_INTR_PENDING if the interrupt is not for
|
|
// the UVM driver.
|
|
//
|
|
// Returns NV_WARN_MORE_PROCESSING_REQUIRED if the UVM top-half ISR was
|
|
// unable to get its lock(s), due to other (UVM) threads holding them.
|
|
//
|
|
// RM can normally treat NV_WARN_MORE_PROCESSING_REQUIRED the same as
|
|
// NV_ERR_NO_INTR_PENDING, but in some cases the extra information may
|
|
// be helpful.
|
|
//
|
|
if (nv_uvm_event_interrupt(nv_get_cached_uuid(nv)) == NV_OK)
|
|
uvm_handled = NV_TRUE;
|
|
#endif
|
|
|
|
rm_handled = rm_isr(nvl->sp[NV_DEV_STACK_ISR], nv,
|
|
&need_to_run_bottom_half_gpu_lock_held);
|
|
|
|
/* Replicating the logic in linux kernel to track unhandled interrupt crossing a threshold */
|
|
if ((nv->flags & NV_FLAG_USES_MSI) || (nv->flags & NV_FLAG_USES_MSIX))
|
|
{
|
|
if (nvl->irq_count != NULL)
|
|
{
|
|
for (index = 0; index < nvl->current_num_irq_tracked; index++)
|
|
{
|
|
if (nvl->irq_count[index].irq == irq)
|
|
{
|
|
found_irq = NV_TRUE;
|
|
break;
|
|
}
|
|
|
|
found_irq = NV_FALSE;
|
|
}
|
|
|
|
if (!found_irq && nvl->current_num_irq_tracked < nvl->num_intr)
|
|
{
|
|
index = nvl->current_num_irq_tracked;
|
|
nvl->irq_count[index].irq = irq;
|
|
nvl->current_num_irq_tracked++;
|
|
found_irq = NV_TRUE;
|
|
}
|
|
|
|
if (found_irq)
|
|
{
|
|
nvl->irq_count[index].total++;
|
|
|
|
if(rm_handled == NV_FALSE)
|
|
{
|
|
os_get_current_time(&sec, &usec);
|
|
currentTime = ((NvU64)sec) * 1000000 + (NvU64)usec;
|
|
|
|
/* Reset unhandled count if it's been more than 0.1 seconds since the last unhandled IRQ */
|
|
if ((currentTime - nvl->irq_count[index].last_unhandled) > RM_UNHANDLED_TIMEOUT_US)
|
|
nvl->irq_count[index].unhandled = 1;
|
|
else
|
|
nvl->irq_count[index].unhandled++;
|
|
|
|
nvl->irq_count[index].last_unhandled = currentTime;
|
|
rm_handled = NV_TRUE;
|
|
}
|
|
|
|
if (nvl->irq_count[index].total >= RM_THRESHOLD_TOTAL_IRQ_COUNT)
|
|
{
|
|
if (nvl->irq_count[index].unhandled > RM_THRESHOLD_UNAHNDLED_IRQ_COUNT)
|
|
nv_printf(NV_DBG_ERRORS,"NVRM: Going over RM unhandled interrupt threshold for irq %d\n", irq);
|
|
|
|
nvl->irq_count[index].total = 0;
|
|
nvl->irq_count[index].unhandled = 0;
|
|
nvl->irq_count[index].last_unhandled = 0;
|
|
}
|
|
}
|
|
else
|
|
nv_printf(NV_DBG_ERRORS,"NVRM: IRQ number out of valid range\n");
|
|
}
|
|
}
|
|
|
|
if (need_to_run_bottom_half_gpu_lock_held)
|
|
{
|
|
return IRQ_WAKE_THREAD;
|
|
}
|
|
else
|
|
{
|
|
//
|
|
// If rm_isr does not need to run a bottom half and mmu_faults_copied
|
|
// indicates that bottom half is needed, then we enqueue a kthread based
|
|
// bottom half, as this specific bottom_half will acquire the GPU lock
|
|
//
|
|
if (rm_fault_handling_needed)
|
|
nv_kthread_q_schedule_q_item(&nvl->bottom_half_q, &nvl->bottom_half_q_item);
|
|
}
|
|
|
|
return IRQ_RETVAL(rm_handled || uvm_handled || rm_fault_handling_needed);
|
|
}
|
|
|
|
irqreturn_t
|
|
nvidia_isr_kthread_bh(
|
|
int irq,
|
|
void *data
|
|
)
|
|
{
|
|
return nvidia_isr_common_bh(data);
|
|
}
|
|
|
|
irqreturn_t
|
|
nvidia_isr_msix_kthread_bh(
|
|
int irq,
|
|
void *data
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
irqreturn_t ret;
|
|
nv_state_t *nv = (nv_state_t *) data;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
//
|
|
// Synchronize kthreads servicing bottom halves for different MSI-X vectors
|
|
// as they share same pre-allocated alt-stack.
|
|
//
|
|
status = os_acquire_mutex(nvl->msix_bh_mutex);
|
|
// os_acquire_mutex can only fail if we cannot sleep and we can
|
|
WARN_ON(status != NV_OK);
|
|
|
|
ret = nvidia_isr_common_bh(data);
|
|
|
|
os_release_mutex(nvl->msix_bh_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static irqreturn_t
|
|
nvidia_isr_common_bh(
|
|
void *data
|
|
)
|
|
{
|
|
nv_state_t *nv = (nv_state_t *) data;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_ISR_BH];
|
|
NV_STATUS status;
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping ISR bottom half\n");
|
|
}
|
|
else
|
|
{
|
|
rm_isr_bh(sp, nv);
|
|
}
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void
|
|
nvidia_isr_bh_unlocked(
|
|
void * args
|
|
)
|
|
{
|
|
nv_state_t *nv = (nv_state_t *) args;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
nvidia_stack_t *sp;
|
|
NV_STATUS status;
|
|
|
|
//
|
|
// Synchronize kthreads servicing unlocked bottom half as they
|
|
// share same pre-allocated stack for alt-stack
|
|
//
|
|
status = os_acquire_mutex(nvl->isr_bh_unlocked_mutex);
|
|
if (status != NV_OK)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unable to take bottom_half mutex!\n",
|
|
__FUNCTION__);
|
|
WARN_ON(1);
|
|
}
|
|
|
|
sp = nvl->sp[NV_DEV_STACK_ISR_BH_UNLOCKED];
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
nv_printf(NV_DBG_INFO,
|
|
"NVRM: GPU is lost, skipping unlocked ISR bottom half\n");
|
|
}
|
|
else
|
|
{
|
|
rm_isr_bh_unlocked(sp, nv);
|
|
}
|
|
|
|
os_release_mutex(nvl->isr_bh_unlocked_mutex);
|
|
}
|
|
|
|
static void
|
|
nvidia_rc_timer_callback(
|
|
struct nv_timer *nv_timer
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = container_of(nv_timer, nv_linux_state_t, rc_timer);
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_TIMER];
|
|
NV_STATUS status;
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
nv_printf(NV_DBG_INFO,
|
|
"NVRM: GPU is lost, skipping device timer callbacks\n");
|
|
return;
|
|
}
|
|
|
|
if (rm_run_rc_callback(sp, nv) == NV_OK)
|
|
{
|
|
// set another timeout 1 sec in the future:
|
|
mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ);
|
|
}
|
|
}
|
|
|
|
/*
|
|
** nvidia_ctl_open
|
|
**
|
|
** nv control driver open entry point. Sessions are created here.
|
|
*/
|
|
static int
|
|
nvidia_ctl_open(
|
|
struct inode *inode,
|
|
struct file *file
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = &nv_ctl_device;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_open\n");
|
|
|
|
down(&nvl->ldata_lock);
|
|
|
|
/* save the nv away in file->private_data */
|
|
nvlfp->nvptr = nvl;
|
|
|
|
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
|
|
{
|
|
nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL);
|
|
}
|
|
|
|
NV_ATOMIC_INC(nvl->usage_count);
|
|
up(&nvl->ldata_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
** nvidia_ctl_close
|
|
*/
|
|
static int
|
|
nvidia_ctl_close(
|
|
struct inode *inode,
|
|
struct file *file
|
|
)
|
|
{
|
|
nv_alloc_t *at, *next;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
|
|
nvidia_stack_t *sp = nvlfp->sp;
|
|
unsigned int i;
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n");
|
|
|
|
down(&nvl->ldata_lock);
|
|
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
|
|
{
|
|
nv->flags &= ~NV_FLAG_OPEN;
|
|
}
|
|
up(&nvl->ldata_lock);
|
|
|
|
rm_cleanup_file_private(sp, nv, &nvlfp->nvfp);
|
|
|
|
if (nvlfp->free_list != NULL)
|
|
{
|
|
at = nvlfp->free_list;
|
|
while (at != NULL)
|
|
{
|
|
next = at->next;
|
|
if (at->pid == os_get_current_process())
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
nv_free_pages(nv, at->num_pages,
|
|
at->flags.contig,
|
|
at->cache_type,
|
|
(void *)at);
|
|
at = next;
|
|
}
|
|
}
|
|
|
|
if (nvlfp->num_attached_gpus != 0)
|
|
{
|
|
size_t i;
|
|
|
|
for (i = 0; i < nvlfp->num_attached_gpus; i++)
|
|
{
|
|
if (nvlfp->attached_gpus[i] != 0)
|
|
nvidia_dev_put(nvlfp->attached_gpus[i], sp);
|
|
}
|
|
|
|
NV_KFREE(nvlfp->attached_gpus, sizeof(NvU32) * nvlfp->num_attached_gpus);
|
|
nvlfp->num_attached_gpus = 0;
|
|
}
|
|
|
|
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
|
|
{
|
|
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
|
|
}
|
|
|
|
nv_free_file_private(nvlfp);
|
|
NV_SET_FILE_PRIVATE(file, NULL);
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
void NV_API_CALL
|
|
nv_set_dma_address_size(
|
|
nv_state_t *nv,
|
|
NvU32 phys_addr_bits
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
NvU64 start_addr = nv_get_dma_start_address(nv);
|
|
NvU64 new_mask = (((NvU64)1) << phys_addr_bits) - 1;
|
|
|
|
nvl->dma_dev.addressable_range.limit = start_addr + new_mask;
|
|
|
|
/*
|
|
* The only scenario in which we definitely should not update the DMA mask
|
|
* is on POWER, when using TCE bypass mode (see nv_get_dma_start_address()
|
|
* for details), since the meaning of the DMA mask is overloaded in that
|
|
* case.
|
|
*/
|
|
if (!nvl->tce_bypass_enabled)
|
|
{
|
|
dma_set_mask(&nvl->pci_dev->dev, new_mask);
|
|
/* Certain kernels have a bug which causes pci_set_consistent_dma_mask
|
|
* to call GPL sme_active symbol, this bug has already been fixed in a
|
|
* minor release update but detect the failure scenario here to prevent
|
|
* an installation regression */
|
|
#if !NV_IS_EXPORT_SYMBOL_GPL_sme_active
|
|
dma_set_coherent_mask(&nvl->pci_dev->dev, new_mask);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
static NvUPtr
|
|
nv_map_guest_pages(nv_alloc_t *at,
|
|
NvU64 address,
|
|
NvU32 page_count,
|
|
NvU32 page_idx)
|
|
{
|
|
struct page **pages;
|
|
NvU32 j;
|
|
NvUPtr virt_addr;
|
|
|
|
NV_KMALLOC(pages, sizeof(struct page *) * page_count);
|
|
if (pages == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to allocate vmap() page descriptor table!\n");
|
|
return 0;
|
|
}
|
|
|
|
for (j = 0; j < page_count; j++)
|
|
{
|
|
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j]->phys_addr);
|
|
}
|
|
|
|
virt_addr = nv_vm_map_pages(pages, page_count,
|
|
at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted);
|
|
NV_KFREE(pages, sizeof(struct page *) * page_count);
|
|
|
|
return virt_addr;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL
|
|
nv_alias_pages(
|
|
nv_state_t *nv,
|
|
NvU32 page_cnt,
|
|
NvU32 contiguous,
|
|
NvU32 cache_type,
|
|
NvU64 guest_id,
|
|
NvU64 *pte_array,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
NvU32 i=0;
|
|
nvidia_pte_t *page_ptr = NULL;
|
|
|
|
at = nvos_create_alloc(nvl->dev, page_cnt);
|
|
|
|
if (at == NULL)
|
|
{
|
|
return NV_ERR_NO_MEMORY;
|
|
}
|
|
|
|
at->cache_type = cache_type;
|
|
if (contiguous)
|
|
at->flags.contig = NV_TRUE;
|
|
#if defined(NVCPU_AARCH64)
|
|
if (at->cache_type != NV_MEMORY_CACHED)
|
|
at->flags.aliased = NV_TRUE;
|
|
#endif
|
|
|
|
at->flags.guest = NV_TRUE;
|
|
|
|
at->order = get_order(at->num_pages * PAGE_SIZE);
|
|
|
|
for (i=0; i < at->num_pages; ++i)
|
|
{
|
|
page_ptr = at->page_table[i];
|
|
|
|
if (contiguous && i>0)
|
|
{
|
|
page_ptr->dma_addr = pte_array[0] + (i << PAGE_SHIFT);
|
|
}
|
|
else
|
|
{
|
|
page_ptr->dma_addr = pte_array[i];
|
|
}
|
|
|
|
page_ptr->phys_addr = page_ptr->dma_addr;
|
|
|
|
/* aliased pages will be mapped on demand. */
|
|
page_ptr->virt_addr = 0x0;
|
|
}
|
|
|
|
at->guest_id = guest_id;
|
|
*priv_data = at;
|
|
NV_ATOMIC_INC(at->usage_count);
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
/*
|
|
* This creates a dummy nv_alloc_t for peer IO mem, so that it can
|
|
* be mapped using NvRmMapMemory.
|
|
*/
|
|
NV_STATUS NV_API_CALL nv_register_peer_io_mem(
|
|
nv_state_t *nv,
|
|
NvU64 *phys_addr,
|
|
NvU64 page_count,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
NvU64 i;
|
|
NvU64 addr;
|
|
|
|
at = nvos_create_alloc(nvl->dev, page_count);
|
|
|
|
if (at == NULL)
|
|
return NV_ERR_NO_MEMORY;
|
|
|
|
// IO regions should be uncached and contiguous
|
|
at->cache_type = NV_MEMORY_UNCACHED;
|
|
at->flags.contig = NV_TRUE;
|
|
#if defined(NVCPU_AARCH64)
|
|
at->flags.aliased = NV_TRUE;
|
|
#endif
|
|
at->flags.peer_io = NV_TRUE;
|
|
|
|
at->order = get_order(at->num_pages * PAGE_SIZE);
|
|
|
|
addr = phys_addr[0];
|
|
|
|
for (i = 0; i < page_count; i++)
|
|
{
|
|
at->page_table[i]->phys_addr = addr;
|
|
addr += PAGE_SIZE;
|
|
}
|
|
|
|
// No struct page array exists for this memory.
|
|
at->user_pages = NULL;
|
|
|
|
*priv_data = at;
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
void NV_API_CALL nv_unregister_peer_io_mem(
|
|
nv_state_t *nv,
|
|
void *priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at = priv_data;
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
nvos_free_alloc(at);
|
|
}
|
|
|
|
/*
|
|
* By registering user pages, we create a dummy nv_alloc_t for it, so that the
|
|
* rest of the RM can treat it like any other alloc.
|
|
*
|
|
* This also converts the page array to an array of physical addresses.
|
|
*/
|
|
NV_STATUS NV_API_CALL nv_register_user_pages(
|
|
nv_state_t *nv,
|
|
NvU64 page_count,
|
|
NvU64 *phys_addr,
|
|
void *import_priv,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
NvU64 i;
|
|
struct page **user_pages;
|
|
nv_linux_state_t *nvl;
|
|
nvidia_pte_t *page_ptr;
|
|
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%x\n", page_count);
|
|
user_pages = *priv_data;
|
|
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
at = nvos_create_alloc(nvl->dev, page_count);
|
|
|
|
if (at == NULL)
|
|
{
|
|
return NV_ERR_NO_MEMORY;
|
|
}
|
|
|
|
/*
|
|
* Anonymous memory currently must be write-back cacheable, and we can't
|
|
* enforce contiguity.
|
|
*/
|
|
at->cache_type = NV_MEMORY_UNCACHED;
|
|
#if defined(NVCPU_AARCH64)
|
|
at->flags.aliased = NV_TRUE;
|
|
#endif
|
|
|
|
at->flags.user = NV_TRUE;
|
|
|
|
at->order = get_order(at->num_pages * PAGE_SIZE);
|
|
|
|
for (i = 0; i < page_count; i++)
|
|
{
|
|
/*
|
|
* We only assign the physical address and not the DMA address, since
|
|
* this allocation hasn't been DMA-mapped yet.
|
|
*/
|
|
page_ptr = at->page_table[i];
|
|
page_ptr->phys_addr = page_to_phys(user_pages[i]);
|
|
|
|
phys_addr[i] = page_ptr->phys_addr;
|
|
}
|
|
|
|
/* Save off the user pages array to be restored later */
|
|
at->user_pages = user_pages;
|
|
|
|
/* Save off the import private data to be returned later */
|
|
if (import_priv != NULL)
|
|
{
|
|
at->import_priv = import_priv;
|
|
}
|
|
|
|
*priv_data = at;
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
void NV_API_CALL nv_unregister_user_pages(
|
|
nv_state_t *nv,
|
|
NvU64 page_count,
|
|
void **import_priv,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at = *priv_data;
|
|
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%x\n", page_count);
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
WARN_ON(!at->flags.user);
|
|
|
|
/* Restore the user pages array for the caller to handle */
|
|
*priv_data = at->user_pages;
|
|
|
|
/* Return the import private data for the caller to handle */
|
|
if (import_priv != NULL)
|
|
{
|
|
*import_priv = at->import_priv;
|
|
}
|
|
|
|
nvos_free_alloc(at);
|
|
}
|
|
|
|
/*
|
|
* This creates a dummy nv_alloc_t for existing physical allocations, so
|
|
* that it can be mapped using NvRmMapMemory and BAR2 code path.
|
|
*/
|
|
NV_STATUS NV_API_CALL nv_register_phys_pages(
|
|
nv_state_t *nv,
|
|
NvU64 *phys_addr,
|
|
NvU64 page_count,
|
|
NvU32 cache_type,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
NvU64 i;
|
|
NvU64 addr;
|
|
|
|
at = nvos_create_alloc(nvl->dev, page_count);
|
|
|
|
if (at == NULL)
|
|
return NV_ERR_NO_MEMORY;
|
|
/*
|
|
* Setting memory flags to cacheable and discontiguous.
|
|
*/
|
|
at->cache_type = cache_type;
|
|
|
|
/*
|
|
* Only physical address is available so we don't try to reuse existing
|
|
* mappings
|
|
*/
|
|
at->flags.physical = NV_TRUE;
|
|
|
|
at->order = get_order(at->num_pages * PAGE_SIZE);
|
|
|
|
for (i = 0, addr = phys_addr[0]; i < page_count; addr = phys_addr[++i])
|
|
{
|
|
at->page_table[i]->phys_addr = addr;
|
|
}
|
|
|
|
at->user_pages = NULL;
|
|
*priv_data = at;
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_register_sgt(
|
|
nv_state_t *nv,
|
|
NvU64 *phys_addr,
|
|
NvU64 page_count,
|
|
NvU32 cache_type,
|
|
void **priv_data,
|
|
struct sg_table *import_sgt,
|
|
void *import_priv
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
unsigned int i, j = 0;
|
|
NvU64 sg_addr, sg_off, sg_len;
|
|
struct scatterlist *sg;
|
|
|
|
at = nvos_create_alloc(nvl->dev, page_count);
|
|
|
|
if (at == NULL)
|
|
return NV_ERR_NO_MEMORY;
|
|
|
|
/* Populate phys addrs with DMA addrs from SGT */
|
|
for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i)
|
|
{
|
|
/*
|
|
* It is possible for dma_map_sg() to merge scatterlist entries, so
|
|
* make sure we account for that here.
|
|
*/
|
|
for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0;
|
|
(sg_off < sg_len) && (j < page_count);
|
|
sg_off += PAGE_SIZE, j++)
|
|
{
|
|
phys_addr[j] = sg_addr + sg_off;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Setting memory flags to cacheable and discontiguous.
|
|
*/
|
|
at->cache_type = cache_type;
|
|
|
|
at->import_sgt = import_sgt;
|
|
|
|
/* Save off the import private data to be returned later */
|
|
if (import_priv != NULL)
|
|
{
|
|
at->import_priv = import_priv;
|
|
}
|
|
|
|
at->order = get_order(at->num_pages * PAGE_SIZE);
|
|
|
|
*priv_data = at;
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
void NV_API_CALL nv_unregister_sgt(
|
|
nv_state_t *nv,
|
|
struct sg_table **import_sgt,
|
|
void **import_priv,
|
|
void *priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at = priv_data;
|
|
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_sgt\n");
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
/* Restore the imported SGT for the caller to handle */
|
|
*import_sgt = at->import_sgt;
|
|
|
|
/* Return the import private data for the caller to handle */
|
|
if (import_priv != NULL)
|
|
{
|
|
*import_priv = at->import_priv;
|
|
}
|
|
|
|
nvos_free_alloc(at);
|
|
}
|
|
|
|
void NV_API_CALL nv_unregister_phys_pages(
|
|
nv_state_t *nv,
|
|
void *priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at = priv_data;
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
nvos_free_alloc(at);
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_get_num_phys_pages(
|
|
void *pAllocPrivate,
|
|
NvU32 *pNumPages
|
|
)
|
|
{
|
|
nv_alloc_t *at = pAllocPrivate;
|
|
|
|
if (!pNumPages) {
|
|
return NV_ERR_INVALID_ARGUMENT;
|
|
}
|
|
|
|
*pNumPages = at->num_pages;
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_get_phys_pages(
|
|
void *pAllocPrivate,
|
|
void *pPages,
|
|
NvU32 *pNumPages
|
|
)
|
|
{
|
|
nv_alloc_t *at = pAllocPrivate;
|
|
struct page **pages = (struct page **)pPages;
|
|
NvU32 page_count;
|
|
int i;
|
|
|
|
if (!pNumPages || !pPages) {
|
|
return NV_ERR_INVALID_ARGUMENT;
|
|
}
|
|
|
|
page_count = NV_MIN(*pNumPages, at->num_pages);
|
|
|
|
for (i = 0; i < page_count; i++) {
|
|
pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i]->phys_addr);
|
|
}
|
|
|
|
*pNumPages = page_count;
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
void* NV_API_CALL nv_alloc_kernel_mapping(
|
|
nv_state_t *nv,
|
|
void *pAllocPrivate,
|
|
NvU64 pageIndex,
|
|
NvU32 pageOffset,
|
|
NvU64 size,
|
|
void **pPrivate
|
|
)
|
|
{
|
|
nv_alloc_t *at = pAllocPrivate;
|
|
NvU32 j, page_count;
|
|
NvUPtr virt_addr;
|
|
struct page **pages;
|
|
NvBool isUserAllocatedMem;
|
|
|
|
//
|
|
// For User allocated memory (like ErrorNotifier's) which is NOT allocated
|
|
// nor owned by RM, the RM driver just stores the physical address
|
|
// corresponding to that memory and does not map it until required.
|
|
// In that case, in page tables the virt_addr == 0, so first we need to map
|
|
// those pages to obtain virtual address.
|
|
//
|
|
isUserAllocatedMem = at->flags.user &&
|
|
!at->page_table[pageIndex]->virt_addr &&
|
|
at->page_table[pageIndex]->phys_addr;
|
|
|
|
//
|
|
// User memory may NOT have kernel VA. So check this and fallback to else
|
|
// case to create one.
|
|
//
|
|
if (((size + pageOffset) <= PAGE_SIZE) &&
|
|
!at->flags.guest && !at->flags.aliased &&
|
|
!isUserAllocatedMem && !at->flags.physical)
|
|
{
|
|
*pPrivate = NULL;
|
|
return (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
|
|
}
|
|
else
|
|
{
|
|
size += pageOffset;
|
|
page_count = (size >> PAGE_SHIFT) + ((size & ~NV_PAGE_MASK) ? 1 : 0);
|
|
|
|
if (at->flags.guest)
|
|
{
|
|
virt_addr = nv_map_guest_pages(at,
|
|
nv->bars[NV_GPU_BAR_INDEX_REGS].cpu_address,
|
|
page_count, pageIndex);
|
|
}
|
|
else
|
|
{
|
|
NV_KMALLOC(pages, sizeof(struct page *) * page_count);
|
|
if (pages == NULL)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: failed to allocate vmap() page descriptor table!\n");
|
|
return NULL;
|
|
}
|
|
|
|
for (j = 0; j < page_count; j++)
|
|
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j]->phys_addr);
|
|
|
|
virt_addr = nv_vm_map_pages(pages, page_count,
|
|
at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted);
|
|
NV_KFREE(pages, sizeof(struct page *) * page_count);
|
|
}
|
|
|
|
if (virt_addr == 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: failed to map pages!\n");
|
|
return NULL;
|
|
}
|
|
|
|
*pPrivate = (void *)(NvUPtr)page_count;
|
|
return (void *)(virt_addr + pageOffset);
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_free_kernel_mapping(
|
|
nv_state_t *nv,
|
|
void *pAllocPrivate,
|
|
void *address,
|
|
void *pPrivate
|
|
)
|
|
{
|
|
nv_alloc_t *at = pAllocPrivate;
|
|
NvUPtr virt_addr;
|
|
NvU32 page_count;
|
|
|
|
virt_addr = ((NvUPtr)address & NV_PAGE_MASK);
|
|
page_count = (NvUPtr)pPrivate;
|
|
|
|
if (at->flags.guest)
|
|
{
|
|
nv_iounmap((void *)virt_addr, (page_count * PAGE_SIZE));
|
|
}
|
|
else if (pPrivate != NULL)
|
|
{
|
|
nv_vm_unmap_pages(virt_addr, page_count);
|
|
}
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_alloc_pages(
|
|
nv_state_t *nv,
|
|
NvU32 page_count,
|
|
NvBool contiguous,
|
|
NvU32 cache_type,
|
|
NvBool zeroed,
|
|
NvBool unencrypted,
|
|
NvU64 *pte_array,
|
|
void **priv_data
|
|
)
|
|
{
|
|
nv_alloc_t *at;
|
|
NV_STATUS status = NV_ERR_NO_MEMORY;
|
|
nv_linux_state_t *nvl = NULL;
|
|
NvBool will_remap = NV_FALSE;
|
|
NvU32 i;
|
|
struct device *dev = NULL;
|
|
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count);
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: contig %d cache_type %d\n",
|
|
contiguous, cache_type);
|
|
|
|
//
|
|
// system memory allocation can be associated with a client instead of a gpu
|
|
// handle the case where per device state is NULL
|
|
//
|
|
if(nv)
|
|
{
|
|
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
will_remap = nv_requires_dma_remap(nv);
|
|
dev = nvl->dev;
|
|
}
|
|
|
|
if (nv_encode_caching(NULL, cache_type, NV_MEMORY_TYPE_SYSTEM))
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
|
|
at = nvos_create_alloc(dev, page_count);
|
|
if (at == NULL)
|
|
return NV_ERR_NO_MEMORY;
|
|
|
|
at->cache_type = cache_type;
|
|
|
|
if (contiguous)
|
|
at->flags.contig = NV_TRUE;
|
|
if (zeroed)
|
|
at->flags.zeroed = NV_TRUE;
|
|
#if defined(NVCPU_AARCH64)
|
|
if (at->cache_type != NV_MEMORY_CACHED)
|
|
at->flags.aliased = NV_TRUE;
|
|
#endif
|
|
if (unencrypted)
|
|
at->flags.unencrypted = NV_TRUE;
|
|
|
|
#if defined(NVCPU_PPC64LE)
|
|
/*
|
|
* Starting on Power9 systems, DMA addresses for NVLink are no longer the
|
|
* same as used over PCIe. There is an address compression scheme required
|
|
* for NVLink ONLY which impacts the upper address bits of the DMA address.
|
|
*
|
|
* This divergence between PCIe and NVLink DMA mappings breaks assumptions
|
|
* in the driver where during initialization we allocate system memory
|
|
* for the GPU to access over PCIe before NVLink is trained -- and some of
|
|
* these mappings persist on the GPU. If these persistent mappings are not
|
|
* equivalent they will cause invalid DMA accesses from the GPU once we
|
|
* switch to NVLink.
|
|
*
|
|
* To work around this we limit all system memory allocations from the driver
|
|
* during the period before NVLink is enabled to be from NUMA node 0 (CPU 0)
|
|
* which has a CPU real address with the upper address bits (above bit 42)
|
|
* set to 0. Effectively making the PCIe and NVLink DMA mappings equivalent
|
|
* allowing persistent system memory mappings already programmed on the GPU
|
|
* to remain valid after NVLink is enabled.
|
|
*
|
|
* See Bug 1920398 for more details.
|
|
*/
|
|
if (nv && nvl->npu && !nvl->dma_dev.nvlink)
|
|
at->flags.node0 = NV_TRUE;
|
|
#endif
|
|
|
|
if (at->flags.contig)
|
|
status = nv_alloc_contig_pages(nv, at);
|
|
else
|
|
status = nv_alloc_system_pages(nv, at);
|
|
|
|
if (status != NV_OK)
|
|
goto failed;
|
|
|
|
for (i = 0; i < ((contiguous) ? 1 : page_count); i++)
|
|
{
|
|
/*
|
|
* The contents of the pte_array[] depend on whether or not this device
|
|
* requires DMA-remapping. If it does, it should be the phys addresses
|
|
* used by the DMA-remapping paths, otherwise it should be the actual
|
|
* address that the device should use for DMA (which, confusingly, may
|
|
* be different than the CPU physical address, due to a static DMA
|
|
* offset).
|
|
*/
|
|
if ((nv == NULL) || will_remap)
|
|
{
|
|
pte_array[i] = at->page_table[i]->phys_addr;
|
|
}
|
|
else
|
|
{
|
|
pte_array[i] = nv_phys_to_dma(dev,
|
|
at->page_table[i]->phys_addr);
|
|
}
|
|
}
|
|
|
|
*priv_data = at;
|
|
NV_ATOMIC_INC(at->usage_count);
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
return NV_OK;
|
|
|
|
failed:
|
|
nvos_free_alloc(at);
|
|
|
|
return status;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_free_pages(
|
|
nv_state_t *nv,
|
|
NvU32 page_count,
|
|
NvBool contiguous,
|
|
NvU32 cache_type,
|
|
void *priv_data
|
|
)
|
|
{
|
|
NV_STATUS rmStatus = NV_OK;
|
|
nv_alloc_t *at = priv_data;
|
|
|
|
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%x\n", page_count);
|
|
|
|
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
|
|
|
/*
|
|
* If the 'at' usage count doesn't drop to zero here, not all of
|
|
* the user mappings have been torn down in time - we can't
|
|
* safely free the memory. We report success back to the RM, but
|
|
* defer the actual free operation until later.
|
|
*
|
|
* This is described in greater detail in the comments above the
|
|
* nvidia_vma_(open|release)() callbacks in nv-mmap.c.
|
|
*/
|
|
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count))
|
|
return NV_OK;
|
|
|
|
if (!at->flags.guest)
|
|
{
|
|
if (at->flags.contig)
|
|
nv_free_contig_pages(at);
|
|
else
|
|
nv_free_system_pages(at);
|
|
}
|
|
|
|
nvos_free_alloc(at);
|
|
|
|
return rmStatus;
|
|
}
|
|
|
|
NvBool nv_lock_init_locks
|
|
(
|
|
nvidia_stack_t *sp,
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
NV_INIT_MUTEX(&nvl->ldata_lock);
|
|
NV_INIT_MUTEX(&nvl->mmap_lock);
|
|
|
|
NV_ATOMIC_SET(nvl->usage_count, 0);
|
|
|
|
if (!rm_init_event_locks(sp, nv))
|
|
return NV_FALSE;
|
|
|
|
return NV_TRUE;
|
|
}
|
|
|
|
void nv_lock_destroy_locks
|
|
(
|
|
nvidia_stack_t *sp,
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
rm_destroy_event_locks(sp, nv);
|
|
}
|
|
|
|
void NV_API_CALL nv_post_event(
|
|
nv_event_t *event,
|
|
NvHandle handle,
|
|
NvU32 index,
|
|
NvU32 info32,
|
|
NvU16 info16,
|
|
NvBool data_valid
|
|
)
|
|
{
|
|
nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(event->nvfp);
|
|
unsigned long eflags;
|
|
nvidia_event_t *nvet;
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags);
|
|
|
|
if (data_valid)
|
|
{
|
|
NV_KMALLOC_ATOMIC(nvet, sizeof(nvidia_event_t));
|
|
if (nvet == NULL)
|
|
{
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags);
|
|
return;
|
|
}
|
|
|
|
if (nvlfp->event_data_tail != NULL)
|
|
nvlfp->event_data_tail->next = nvet;
|
|
if (nvlfp->event_data_head == NULL)
|
|
nvlfp->event_data_head = nvet;
|
|
nvlfp->event_data_tail = nvet;
|
|
nvet->next = NULL;
|
|
|
|
nvet->event = *event;
|
|
nvet->event.hObject = handle;
|
|
nvet->event.index = index;
|
|
nvet->event.info32 = info32;
|
|
nvet->event.info16 = info16;
|
|
}
|
|
//
|
|
// 'event_pending' is interpreted by nvidia_poll() and nv_get_event() to
|
|
// mean that an event without data is pending. Therefore, only set it to
|
|
// true here if newly posted event is dataless.
|
|
//
|
|
else
|
|
{
|
|
nvlfp->dataless_event_pending = NV_TRUE;
|
|
}
|
|
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags);
|
|
|
|
wake_up_interruptible(&nvlfp->waitqueue);
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_is_rm_firmware_active(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
if (rm_firmware_active)
|
|
{
|
|
// "all" here means all GPUs
|
|
if (strcmp(rm_firmware_active, "all") == 0)
|
|
return NV_TRUE;
|
|
}
|
|
return NV_FALSE;
|
|
}
|
|
|
|
const char *nv_firmware_path(
|
|
nv_firmware_t fw_type
|
|
)
|
|
{
|
|
switch (fw_type)
|
|
{
|
|
case NV_FIRMWARE_GSP:
|
|
return NV_FIRMWARE_GSP_FILENAME;
|
|
case NV_FIRMWARE_GSP_LOG:
|
|
return NV_FIRMWARE_GSP_LOG_FILENAME;
|
|
}
|
|
return "";
|
|
}
|
|
|
|
const void* NV_API_CALL nv_get_firmware(
|
|
nv_state_t *nv,
|
|
nv_firmware_t fw_type,
|
|
const void **fw_buf,
|
|
NvU32 *fw_size
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
const struct firmware *fw;
|
|
|
|
// path is relative to /lib/firmware
|
|
// if this fails it will print an error to dmesg
|
|
if (request_firmware(&fw, nv_firmware_path(fw_type), nvl->dev) != 0)
|
|
return NULL;
|
|
|
|
*fw_size = fw->size;
|
|
*fw_buf = fw->data;
|
|
|
|
return fw;
|
|
}
|
|
|
|
void NV_API_CALL nv_put_firmware(
|
|
const void *fw_handle
|
|
)
|
|
{
|
|
release_firmware(fw_handle);
|
|
}
|
|
|
|
nv_file_private_t* NV_API_CALL nv_get_file_private(
|
|
NvS32 fd,
|
|
NvBool ctl,
|
|
void **os_private
|
|
)
|
|
{
|
|
struct file *filp = NULL;
|
|
nv_linux_file_private_t *nvlfp = NULL;
|
|
dev_t rdev = 0;
|
|
|
|
filp = fget(fd);
|
|
|
|
if (filp == NULL || !NV_FILE_INODE(filp))
|
|
{
|
|
goto fail;
|
|
}
|
|
|
|
rdev = (NV_FILE_INODE(filp))->i_rdev;
|
|
|
|
if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER)
|
|
{
|
|
goto fail;
|
|
}
|
|
|
|
if (ctl)
|
|
{
|
|
if (MINOR(rdev) != NV_CONTROL_DEVICE_MINOR)
|
|
goto fail;
|
|
}
|
|
else
|
|
{
|
|
NvBool found = NV_FALSE;
|
|
int i;
|
|
|
|
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
|
|
{
|
|
if ((nv_minor_num_table[i] != NULL) && (MINOR(rdev) == i))
|
|
{
|
|
found = NV_TRUE;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!found)
|
|
goto fail;
|
|
}
|
|
|
|
nvlfp = NV_GET_LINUX_FILE_PRIVATE(filp);
|
|
|
|
*os_private = filp;
|
|
|
|
return &nvlfp->nvfp;
|
|
|
|
fail:
|
|
|
|
if (filp != NULL)
|
|
{
|
|
fput(filp);
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void NV_API_CALL nv_put_file_private(
|
|
void *os_private
|
|
)
|
|
{
|
|
struct file *filp = os_private;
|
|
fput(filp);
|
|
}
|
|
|
|
int NV_API_CALL nv_get_event(
|
|
nv_file_private_t *nvfp,
|
|
nv_event_t *event,
|
|
NvU32 *pending
|
|
)
|
|
{
|
|
nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(nvfp);
|
|
nvidia_event_t *nvet;
|
|
unsigned long eflags;
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags);
|
|
|
|
nvet = nvlfp->event_data_head;
|
|
if (nvet == NULL)
|
|
{
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags);
|
|
return NV_ERR_GENERIC;
|
|
}
|
|
|
|
*event = nvet->event;
|
|
|
|
if (nvlfp->event_data_tail == nvet)
|
|
nvlfp->event_data_tail = NULL;
|
|
nvlfp->event_data_head = nvet->next;
|
|
|
|
*pending = (nvlfp->event_data_head != NULL);
|
|
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags);
|
|
|
|
NV_KFREE(nvet, sizeof(nvidia_event_t));
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
int NV_API_CALL nv_start_rc_timer(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
if (nv->rc_timer_enabled)
|
|
return -1;
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n");
|
|
|
|
nv_timer_setup(&nvl->rc_timer, nvidia_rc_timer_callback);
|
|
|
|
nv->rc_timer_enabled = 1;
|
|
|
|
// set the timeout for 1 second in the future:
|
|
mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ);
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
int NV_API_CALL nv_stop_rc_timer(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
if (!nv->rc_timer_enabled)
|
|
return -1;
|
|
|
|
nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n");
|
|
nv->rc_timer_enabled = 0;
|
|
del_timer_sync(&nvl->rc_timer.kernel_timer);
|
|
nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define SNAPSHOT_TIMER_FREQ (jiffies + HZ / NV_SNAPSHOT_TIMER_HZ)
|
|
|
|
static void snapshot_timer_callback(struct nv_timer *timer)
|
|
{
|
|
nv_linux_state_t *nvl = &nv_ctl_device;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
unsigned long flags;
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags);
|
|
if (nvl->snapshot_callback != NULL)
|
|
{
|
|
nvl->snapshot_callback(nv->profiler_context);
|
|
mod_timer(&timer->kernel_timer, SNAPSHOT_TIMER_FREQ);
|
|
}
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags);
|
|
}
|
|
|
|
void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context))
|
|
{
|
|
nv_linux_state_t *nvl = &nv_ctl_device;
|
|
|
|
nvl->snapshot_callback = snapshot_callback;
|
|
nv_timer_setup(&nvl->snapshot_timer, snapshot_timer_callback);
|
|
mod_timer(&nvl->snapshot_timer.kernel_timer, SNAPSHOT_TIMER_FREQ);
|
|
}
|
|
|
|
void NV_API_CALL nv_stop_snapshot_timer(void)
|
|
{
|
|
nv_linux_state_t *nvl = &nv_ctl_device;
|
|
NvBool timer_active;
|
|
unsigned long flags;
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags);
|
|
timer_active = nvl->snapshot_callback != NULL;
|
|
nvl->snapshot_callback = NULL;
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags);
|
|
|
|
if (timer_active)
|
|
del_timer_sync(&nvl->snapshot_timer.kernel_timer);
|
|
}
|
|
|
|
void NV_API_CALL nv_flush_snapshot_timer(void)
|
|
{
|
|
nv_linux_state_t *nvl = &nv_ctl_device;
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
unsigned long flags;
|
|
|
|
NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags);
|
|
if (nvl->snapshot_callback != NULL)
|
|
nvl->snapshot_callback(nv->profiler_context);
|
|
NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags);
|
|
}
|
|
|
|
static int __init
|
|
nvos_count_devices(void)
|
|
{
|
|
int count;
|
|
|
|
count = nv_pci_count_devices();
|
|
|
|
count += nv_platform_count_devices();
|
|
|
|
|
|
return count;
|
|
}
|
|
|
|
NvBool nvos_is_chipset_io_coherent(void)
|
|
{
|
|
if (nv_chipset_is_io_coherent == NV_TRISTATE_INDETERMINATE)
|
|
{
|
|
nvidia_stack_t *sp = NULL;
|
|
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS,
|
|
"NVRM: cannot allocate stack for platform coherence check callback \n");
|
|
WARN_ON(1);
|
|
return NV_FALSE;
|
|
}
|
|
|
|
nv_chipset_is_io_coherent = rm_is_chipset_io_coherent(sp);
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
}
|
|
|
|
return nv_chipset_is_io_coherent;
|
|
}
|
|
|
|
#if defined(CONFIG_PM)
|
|
static NV_STATUS
|
|
nv_power_management(
|
|
nv_state_t *nv,
|
|
nv_pm_action_t pm_action
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
int status = NV_OK;
|
|
nvidia_stack_t *sp = NULL;
|
|
|
|
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
|
{
|
|
return NV_ERR_NO_MEMORY;
|
|
}
|
|
|
|
status = nv_check_gpu_state(nv);
|
|
if (status == NV_ERR_GPU_IS_LOST)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping PM event\n");
|
|
goto failure;
|
|
}
|
|
|
|
switch (pm_action)
|
|
{
|
|
case NV_PM_ACTION_STANDBY:
|
|
/* fall through */
|
|
case NV_PM_ACTION_HIBERNATE:
|
|
{
|
|
status = rm_power_management(sp, nv, pm_action);
|
|
|
|
nv_kthread_q_stop(&nvl->bottom_half_q);
|
|
|
|
nv_disable_pat_support();
|
|
break;
|
|
}
|
|
case NV_PM_ACTION_RESUME:
|
|
{
|
|
nv_enable_pat_support();
|
|
|
|
nv_kthread_q_item_init(&nvl->bottom_half_q_item,
|
|
nvidia_isr_bh_unlocked, (void *)nv);
|
|
|
|
status = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name);
|
|
if (status != NV_OK)
|
|
break;
|
|
|
|
status = rm_power_management(sp, nv, pm_action);
|
|
break;
|
|
}
|
|
default:
|
|
status = NV_ERR_INVALID_ARGUMENT;
|
|
break;
|
|
}
|
|
|
|
failure:
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
return status;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nv_restore_user_channels(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
NV_STATUS status = NV_OK;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
nv_stack_t *sp = NULL;
|
|
|
|
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
|
{
|
|
return NV_ERR_NO_MEMORY;
|
|
}
|
|
|
|
down(&nvl->ldata_lock);
|
|
|
|
if ((nv->flags & NV_FLAG_OPEN) == 0)
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
status = rm_restart_user_channels(sp, nv);
|
|
WARN_ON(status != NV_OK);
|
|
|
|
down(&nvl->mmap_lock);
|
|
|
|
nv_set_safe_to_mmap_locked(nv, NV_TRUE);
|
|
|
|
up(&nvl->mmap_lock);
|
|
|
|
rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE);
|
|
|
|
done:
|
|
up(&nvl->ldata_lock);
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
return status;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nv_preempt_user_channels(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
NV_STATUS status = NV_OK;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
nv_stack_t *sp = NULL;
|
|
|
|
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
|
{
|
|
return NV_ERR_NO_MEMORY;
|
|
}
|
|
|
|
down(&nvl->ldata_lock);
|
|
|
|
if ((nv->flags & NV_FLAG_OPEN) == 0)
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
status = rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE);
|
|
WARN_ON(status != NV_OK);
|
|
|
|
down(&nvl->mmap_lock);
|
|
|
|
nv_set_safe_to_mmap_locked(nv, NV_FALSE);
|
|
nv_revoke_gpu_mappings_locked(nv);
|
|
|
|
up(&nvl->mmap_lock);
|
|
|
|
status = rm_stop_user_channels(sp, nv);
|
|
WARN_ON(status != NV_OK);
|
|
|
|
done:
|
|
up(&nvl->ldata_lock);
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
return status;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nvidia_suspend(
|
|
struct device *dev,
|
|
nv_pm_action_t pm_action,
|
|
NvBool is_procfs_suspend
|
|
)
|
|
{
|
|
NV_STATUS status = NV_OK;
|
|
struct pci_dev *pci_dev = NULL;
|
|
nv_linux_state_t *nvl;
|
|
nv_state_t *nv;
|
|
|
|
if (nv_dev_is_pci(dev))
|
|
{
|
|
pci_dev = to_pci_dev(dev);
|
|
nvl = pci_get_drvdata(pci_dev);
|
|
}
|
|
else
|
|
{
|
|
nvl = dev_get_drvdata(dev);
|
|
}
|
|
nv = NV_STATE_PTR(nvl);
|
|
|
|
down(&nvl->ldata_lock);
|
|
|
|
if (((nv->flags & NV_FLAG_OPEN) == 0) &&
|
|
((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) == 0))
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
if ((nv->flags & NV_FLAG_SUSPENDED) != 0)
|
|
{
|
|
nvl->suspend_count++;
|
|
goto pci_pm;
|
|
}
|
|
|
|
if (nv->preserve_vidmem_allocations && !is_procfs_suspend)
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"PreserveVideoMemoryAllocations module parameter is set. "
|
|
"System Power Management attempted without driver procfs suspend interface. "
|
|
"Please refer to the 'Configuring Power Management Support' section in the driver README.\n");
|
|
status = NV_ERR_NOT_SUPPORTED;
|
|
goto done;
|
|
}
|
|
|
|
nvidia_modeset_suspend(nv->gpu_id);
|
|
|
|
status = nv_power_management(nv, pm_action);
|
|
|
|
if (status != NV_OK)
|
|
{
|
|
nvidia_modeset_resume(nv->gpu_id);
|
|
goto done;
|
|
}
|
|
else
|
|
{
|
|
nv->flags |= NV_FLAG_SUSPENDED;
|
|
}
|
|
|
|
pci_pm:
|
|
/*
|
|
* Check if PCI power state should be D0 during system suspend. The PCI PM
|
|
* core will change the power state only if the driver has not saved the
|
|
* state in it's suspend callback.
|
|
*/
|
|
if ((nv->d0_state_in_suspend) && (pci_dev != NULL) &&
|
|
!is_procfs_suspend && (pm_action == NV_PM_ACTION_STANDBY))
|
|
{
|
|
pci_save_state(pci_dev);
|
|
}
|
|
|
|
done:
|
|
up(&nvl->ldata_lock);
|
|
|
|
return status;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nvidia_resume(
|
|
struct device *dev,
|
|
nv_pm_action_t pm_action
|
|
)
|
|
{
|
|
NV_STATUS status = NV_OK;
|
|
struct pci_dev *pci_dev;
|
|
nv_linux_state_t *nvl;
|
|
nv_state_t *nv;
|
|
|
|
if (nv_dev_is_pci(dev))
|
|
{
|
|
pci_dev = to_pci_dev(dev);
|
|
nvl = pci_get_drvdata(pci_dev);
|
|
}
|
|
else
|
|
{
|
|
nvl = dev_get_drvdata(dev);
|
|
}
|
|
nv = NV_STATE_PTR(nvl);
|
|
|
|
down(&nvl->ldata_lock);
|
|
|
|
if ((nv->flags & NV_FLAG_SUSPENDED) == 0)
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
if (nvl->suspend_count != 0)
|
|
{
|
|
nvl->suspend_count--;
|
|
}
|
|
else
|
|
{
|
|
status = nv_power_management(nv, pm_action);
|
|
|
|
if (status == NV_OK)
|
|
{
|
|
nvidia_modeset_resume(nv->gpu_id);
|
|
nv->flags &= ~NV_FLAG_SUSPENDED;
|
|
}
|
|
}
|
|
|
|
done:
|
|
up(&nvl->ldata_lock);
|
|
|
|
return status;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nv_resume_devices(
|
|
nv_pm_action_t pm_action,
|
|
nv_pm_action_depth_t pm_action_depth
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
NvBool resume_devices = NV_TRUE;
|
|
NV_STATUS status;
|
|
|
|
if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET)
|
|
{
|
|
goto resume_modeset;
|
|
}
|
|
|
|
if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM)
|
|
{
|
|
resume_devices = NV_FALSE;
|
|
}
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
{
|
|
if (resume_devices)
|
|
{
|
|
status = nvidia_resume(nvl->dev, pm_action);
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
status = nv_uvm_resume();
|
|
WARN_ON(status != NV_OK);
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
{
|
|
status = nv_restore_user_channels(NV_STATE_PTR(nvl));
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
resume_modeset:
|
|
nvidia_modeset_resume(0);
|
|
|
|
return NV_OK;
|
|
}
|
|
|
|
static NV_STATUS
|
|
nv_suspend_devices(
|
|
nv_pm_action_t pm_action,
|
|
nv_pm_action_depth_t pm_action_depth
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
NvBool resume_devices = NV_FALSE;
|
|
NV_STATUS status = NV_OK;
|
|
|
|
nvidia_modeset_suspend(0);
|
|
|
|
if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET)
|
|
{
|
|
return NV_OK;
|
|
}
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next)
|
|
{
|
|
status = nv_preempt_user_channels(NV_STATE_PTR(nvl));
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
if (status == NV_OK)
|
|
{
|
|
status = nv_uvm_suspend();
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
if (status != NV_OK)
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM)
|
|
{
|
|
return NV_OK;
|
|
}
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next)
|
|
{
|
|
status = nvidia_suspend(nvl->dev, pm_action, NV_TRUE);
|
|
WARN_ON(status != NV_OK);
|
|
}
|
|
if (status != NV_OK)
|
|
{
|
|
resume_devices = NV_TRUE;
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
done:
|
|
if (status != NV_OK)
|
|
{
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
{
|
|
if (resume_devices)
|
|
{
|
|
nvidia_resume(nvl->dev, pm_action);
|
|
}
|
|
|
|
nv_restore_user_channels(NV_STATE_PTR(nvl));
|
|
}
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
NV_STATUS
|
|
nv_set_system_power_state(
|
|
nv_power_state_t power_state,
|
|
nv_pm_action_depth_t pm_action_depth
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
nv_pm_action_t pm_action;
|
|
|
|
switch (power_state)
|
|
{
|
|
case NV_POWER_STATE_IN_HIBERNATE:
|
|
pm_action = NV_PM_ACTION_HIBERNATE;
|
|
break;
|
|
case NV_POWER_STATE_IN_STANDBY:
|
|
pm_action = NV_PM_ACTION_STANDBY;
|
|
break;
|
|
case NV_POWER_STATE_RUNNING:
|
|
pm_action = NV_PM_ACTION_RESUME;
|
|
break;
|
|
default:
|
|
return NV_ERR_INVALID_ARGUMENT;
|
|
}
|
|
|
|
down(&nv_system_power_state_lock);
|
|
|
|
if (nv_system_power_state == power_state)
|
|
{
|
|
status = NV_OK;
|
|
goto done;
|
|
}
|
|
|
|
if (power_state == NV_POWER_STATE_RUNNING)
|
|
{
|
|
status = nv_resume_devices(pm_action, nv_system_pm_action_depth);
|
|
up_write(&nv_system_pm_lock);
|
|
}
|
|
else
|
|
{
|
|
if (nv_system_power_state != NV_POWER_STATE_RUNNING)
|
|
{
|
|
status = NV_ERR_INVALID_ARGUMENT;
|
|
goto done;
|
|
}
|
|
|
|
nv_system_pm_action_depth = pm_action_depth;
|
|
|
|
down_write(&nv_system_pm_lock);
|
|
status = nv_suspend_devices(pm_action, nv_system_pm_action_depth);
|
|
if (status != NV_OK)
|
|
{
|
|
up_write(&nv_system_pm_lock);
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
nv_system_power_state = power_state;
|
|
|
|
done:
|
|
up(&nv_system_power_state_lock);
|
|
|
|
return status;
|
|
}
|
|
|
|
int nv_pmops_suspend(
|
|
struct device *dev
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
|
|
status = nvidia_suspend(dev, NV_PM_ACTION_STANDBY, NV_FALSE);
|
|
return (status == NV_OK) ? 0 : -EIO;
|
|
}
|
|
|
|
int nv_pmops_resume(
|
|
struct device *dev
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
|
|
status = nvidia_resume(dev, NV_PM_ACTION_RESUME);
|
|
return (status == NV_OK) ? 0 : -EIO;
|
|
}
|
|
|
|
int nv_pmops_freeze(
|
|
struct device *dev
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
|
|
status = nvidia_suspend(dev, NV_PM_ACTION_HIBERNATE, NV_FALSE);
|
|
return (status == NV_OK) ? 0 : -EIO;
|
|
}
|
|
|
|
int nv_pmops_thaw(
|
|
struct device *dev
|
|
)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int nv_pmops_restore(
|
|
struct device *dev
|
|
)
|
|
{
|
|
NV_STATUS status;
|
|
|
|
status = nvidia_resume(dev, NV_PM_ACTION_RESUME);
|
|
return (status == NV_OK) ? 0 : -EIO;
|
|
}
|
|
|
|
int nv_pmops_poweroff(
|
|
struct device *dev
|
|
)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
nvidia_transition_dynamic_power(
|
|
struct device *dev,
|
|
NvBool enter
|
|
)
|
|
{
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
nvidia_stack_t *sp = NULL;
|
|
NV_STATUS status;
|
|
|
|
if ((nv->flags & (NV_FLAG_OPEN | NV_FLAG_PERSISTENT_SW_STATE)) == 0)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
|
{
|
|
return -ENOMEM;
|
|
}
|
|
|
|
status = rm_transition_dynamic_power(sp, nv, enter);
|
|
|
|
nv_kmem_cache_free_stack(sp);
|
|
|
|
return (status == NV_OK) ? 0 : -EIO;
|
|
}
|
|
|
|
int nv_pmops_runtime_suspend(
|
|
struct device *dev
|
|
)
|
|
{
|
|
return nvidia_transition_dynamic_power(dev, NV_TRUE);
|
|
}
|
|
|
|
int nv_pmops_runtime_resume(
|
|
struct device *dev
|
|
)
|
|
{
|
|
return nvidia_transition_dynamic_power(dev, NV_FALSE);
|
|
}
|
|
#endif /* defined(CONFIG_PM) */
|
|
|
|
nv_state_t* NV_API_CALL nv_get_adapter_state(
|
|
NvU32 domain,
|
|
NvU8 bus,
|
|
NvU8 slot
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
{
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
if (nv->pci_info.domain == domain && nv->pci_info.bus == bus
|
|
&& nv->pci_info.slot == slot)
|
|
{
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
return nv;
|
|
}
|
|
}
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
return NULL;
|
|
}
|
|
|
|
nv_state_t* NV_API_CALL nv_get_ctl_state(void)
|
|
{
|
|
return NV_STATE_PTR(&nv_ctl_device);
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_log_error(
|
|
nv_state_t *nv,
|
|
NvU32 error_number,
|
|
const char *format,
|
|
va_list ap
|
|
)
|
|
{
|
|
NV_STATUS status = NV_OK;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
nv_report_error(nvl->pci_dev, error_number, format, ap);
|
|
#if defined(CONFIG_CRAY_XT)
|
|
status = nvos_forward_error_to_cray(nvl->pci_dev, error_number,
|
|
format, ap);
|
|
#endif
|
|
|
|
return status;
|
|
}
|
|
|
|
NvU64 NV_API_CALL nv_get_dma_start_address(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NVCPU_PPC64LE)
|
|
struct pci_dev *pci_dev;
|
|
dma_addr_t dma_addr;
|
|
NvU64 saved_dma_mask;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
/*
|
|
* If TCE bypass is disabled via a module parameter, then just return
|
|
* the default (which is 0).
|
|
*
|
|
* Otherwise, the DMA start address only needs to be set once, and it
|
|
* won't change afterward. Just return the cached value if asked again,
|
|
* to avoid the kernel printing redundant messages to the kernel
|
|
* log when we call pci_set_dma_mask().
|
|
*/
|
|
if ((nv_tce_bypass_mode == NV_TCE_BYPASS_MODE_DISABLE) ||
|
|
(nvl->tce_bypass_enabled))
|
|
{
|
|
return nvl->dma_dev.addressable_range.start;
|
|
}
|
|
|
|
pci_dev = nvl->pci_dev;
|
|
|
|
/*
|
|
* Linux on IBM POWER8 offers 2 different DMA set-ups, sometimes
|
|
* referred to as "windows".
|
|
*
|
|
* The "default window" provides a 2GB region of PCI address space
|
|
* located below the 32-bit line. The IOMMU is used to provide a
|
|
* "rich" mapping--any page in system memory can be mapped at an
|
|
* arbitrary address within this window. The mappings are dynamic
|
|
* and pass in and out of being as pci_map*()/pci_unmap*() calls
|
|
* are made.
|
|
*
|
|
* Dynamic DMA Windows (sometimes "Huge DDW") provides a linear
|
|
* mapping of the system's entire physical address space at some
|
|
* fixed offset above the 59-bit line. IOMMU is still used, and
|
|
* pci_map*()/pci_unmap*() are still required, but mappings are
|
|
* static. They're effectively set up in advance, and any given
|
|
* system page will always map to the same PCI bus address. I.e.
|
|
* physical 0x00000000xxxxxxxx => PCI 0x08000000xxxxxxxx
|
|
*
|
|
* This driver does not support the 2G default window because
|
|
* of its limited size, and for reasons having to do with UVM.
|
|
*
|
|
* Linux on POWER8 will only provide the DDW-style full linear
|
|
* mapping when the driver claims support for 64-bit DMA addressing
|
|
* (a pre-requisite because the PCI addresses used in this case will
|
|
* be near the top of the 64-bit range). The linear mapping
|
|
* is not available in all system configurations.
|
|
*
|
|
* Detect whether the linear mapping is present by claiming
|
|
* 64-bit support and then mapping physical page 0. For historical
|
|
* reasons, Linux on POWER8 will never map a page to PCI address 0x0.
|
|
* In the "default window" case page 0 will be mapped to some
|
|
* non-zero address below the 32-bit line. In the
|
|
* DDW/linear-mapping case, it will be mapped to address 0 plus
|
|
* some high-order offset.
|
|
*
|
|
* If the linear mapping is present and sane then return the offset
|
|
* as the starting address for all DMA mappings.
|
|
*/
|
|
saved_dma_mask = pci_dev->dma_mask;
|
|
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) != 0)
|
|
{
|
|
goto done;
|
|
}
|
|
|
|
dma_addr = pci_map_single(pci_dev, NULL, 1, DMA_BIDIRECTIONAL);
|
|
if (pci_dma_mapping_error(pci_dev, dma_addr))
|
|
{
|
|
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
|
goto done;
|
|
}
|
|
|
|
pci_unmap_single(pci_dev, dma_addr, 1, DMA_BIDIRECTIONAL);
|
|
|
|
/*
|
|
* From IBM: "For IODA2, native DMA bypass or KVM TCE-based implementation
|
|
* of full 64-bit DMA support will establish a window in address-space
|
|
* with the high 14 bits being constant and the bottom up-to-50 bits
|
|
* varying with the mapping."
|
|
*
|
|
* Unfortunately, we don't have any good interfaces or definitions from
|
|
* the kernel to get information about the DMA offset assigned by OS.
|
|
* However, we have been told that the offset will be defined by the top
|
|
* 14 bits of the address, and bits 40-49 will not vary for any DMA
|
|
* mappings until 1TB of system memory is surpassed; this limitation is
|
|
* essential for us to function properly since our current GPUs only
|
|
* support 40 physical address bits. We are in a fragile place where we
|
|
* need to tell the OS that we're capable of 64-bit addressing, while
|
|
* relying on the assumption that the top 24 bits will not vary in this
|
|
* case.
|
|
*
|
|
* The way we try to compute the window, then, is mask the trial mapping
|
|
* against the DMA capabilities of the device. That way, devices with
|
|
* greater addressing capabilities will only take the bits it needs to
|
|
* define the window.
|
|
*/
|
|
if ((dma_addr & DMA_BIT_MASK(32)) != 0)
|
|
{
|
|
/*
|
|
* Huge DDW not available - page 0 mapped to non-zero address below
|
|
* the 32-bit line.
|
|
*/
|
|
nv_printf(NV_DBG_WARNINGS,
|
|
"NVRM: DMA window limited by platform\n");
|
|
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
|
goto done;
|
|
}
|
|
else if ((dma_addr & saved_dma_mask) != 0)
|
|
{
|
|
NvU64 memory_size = os_get_num_phys_pages() * PAGE_SIZE;
|
|
if ((dma_addr & ~saved_dma_mask) !=
|
|
((dma_addr + memory_size) & ~saved_dma_mask))
|
|
{
|
|
/*
|
|
* The physical window straddles our addressing limit boundary,
|
|
* e.g., for an adapter that can address up to 1TB, the window
|
|
* crosses the 40-bit limit so that the lower end of the range
|
|
* has different bits 63:40 than the higher end of the range.
|
|
* We can only handle a single, static value for bits 63:40, so
|
|
* we must fall back here.
|
|
*/
|
|
nv_printf(NV_DBG_WARNINGS,
|
|
"NVRM: DMA window limited by memory size\n");
|
|
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
nvl->tce_bypass_enabled = NV_TRUE;
|
|
nvl->dma_dev.addressable_range.start = dma_addr & ~(saved_dma_mask);
|
|
|
|
/* Update the coherent mask to match */
|
|
dma_set_coherent_mask(&pci_dev->dev, pci_dev->dma_mask);
|
|
|
|
done:
|
|
return nvl->dma_dev.addressable_range.start;
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_set_primary_vga_status(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
/* IORESOURCE_ROM_SHADOW wasn't added until 2.6.10 */
|
|
#if defined(IORESOURCE_ROM_SHADOW)
|
|
nv_linux_state_t *nvl;
|
|
struct pci_dev *pci_dev;
|
|
|
|
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
pci_dev = nvl->pci_dev;
|
|
|
|
nv->primary_vga = ((NV_PCI_RESOURCE_FLAGS(pci_dev, PCI_ROM_RESOURCE) &
|
|
IORESOURCE_ROM_SHADOW) == IORESOURCE_ROM_SHADOW);
|
|
return NV_OK;
|
|
#else
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
#endif
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_pci_trigger_recovery(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
|
|
#if defined(NV_PCI_ERROR_RECOVERY)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
/*
|
|
* Calling readl() on PPC64LE will allow the kernel to check its state for
|
|
* the device and update it accordingly. This needs to be done before
|
|
* checking if the PCI channel is offline, so that we don't check stale
|
|
* state.
|
|
*
|
|
* This will also kick off the recovery process for the device.
|
|
*/
|
|
if (NV_PCI_ERROR_RECOVERY_ENABLED())
|
|
{
|
|
if (readl(nv->regs->map) == 0xFFFFFFFF)
|
|
{
|
|
if (pci_channel_offline(nvl->pci_dev))
|
|
{
|
|
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
|
"PCI channel for the device is offline\n");
|
|
status = NV_OK;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
return status;
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_requires_dma_remap(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
NvBool dma_remap = NV_FALSE;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
dma_remap = !nv_dma_maps_swiotlb(nvl->dev);
|
|
return dma_remap;
|
|
}
|
|
|
|
/*
|
|
* Intended for use by external kernel modules to list nvidia gpu ids.
|
|
*/
|
|
NvBool nvidia_get_gpuid_list(NvU32 *gpu_ids, NvU32 *gpu_count)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
unsigned int count;
|
|
NvBool ret = NV_TRUE;
|
|
|
|
LOCK_NV_LINUX_DEVICES();
|
|
|
|
count = 0;
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
count++;
|
|
|
|
if (*gpu_count == 0)
|
|
{
|
|
goto done;
|
|
}
|
|
else if ((*gpu_count) < count)
|
|
{
|
|
ret = NV_FALSE;
|
|
goto done;
|
|
}
|
|
|
|
count = 0;
|
|
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
|
|
{
|
|
nv_state_t *nv = NV_STATE_PTR(nvl);
|
|
gpu_ids[count++] = nv->gpu_id;
|
|
}
|
|
|
|
|
|
done:
|
|
|
|
*gpu_count = count;
|
|
|
|
UNLOCK_NV_LINUX_DEVICES();
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Kernel-level analog to nvidia_open, intended for use by external
|
|
* kernel modules. This increments the ref count of the device with
|
|
* the given gpu_id and makes sure the device has been initialized.
|
|
*
|
|
* Clients of this interface are counted by the RM reset path, to ensure a
|
|
* GPU is not reset while the GPU is active.
|
|
*
|
|
* Returns -ENODEV if the given gpu_id does not exist.
|
|
*/
|
|
int nvidia_dev_get(NvU32 gpu_id, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
int rc;
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_gpu_id(gpu_id);
|
|
if (!nvl)
|
|
return -ENODEV;
|
|
|
|
rc = nv_open_device(NV_STATE_PTR(nvl), sp);
|
|
|
|
if (rc == 0)
|
|
WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK);
|
|
|
|
up(&nvl->ldata_lock);
|
|
return rc;
|
|
}
|
|
|
|
/*
|
|
* Kernel-level analog to nvidia_close, intended for use by external
|
|
* kernel modules. This decrements the ref count of the device with
|
|
* the given gpu_id, potentially tearing it down.
|
|
*/
|
|
void nvidia_dev_put(NvU32 gpu_id, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_gpu_id(gpu_id);
|
|
if (!nvl)
|
|
return;
|
|
|
|
nv_close_device(NV_STATE_PTR(nvl), sp);
|
|
|
|
WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK);
|
|
|
|
up(&nvl->ldata_lock);
|
|
}
|
|
|
|
/*
|
|
* Like nvidia_dev_get but uses UUID instead of gpu_id. Note that this may
|
|
* trigger initialization and teardown of unrelated devices to look up their
|
|
* UUIDs.
|
|
*
|
|
* Clients of this interface are counted by the RM reset path, to ensure a
|
|
* GPU is not reset while the GPU is active.
|
|
*/
|
|
int nvidia_dev_get_uuid(const NvU8 *uuid, nvidia_stack_t *sp)
|
|
{
|
|
nv_state_t *nv = NULL;
|
|
nv_linux_state_t *nvl = NULL;
|
|
const NvU8 *dev_uuid;
|
|
int rc = 0;
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_uuid_candidate(uuid);
|
|
while (nvl)
|
|
{
|
|
nv = NV_STATE_PTR(nvl);
|
|
|
|
/*
|
|
* If the device is missing its UUID, this call exists solely so
|
|
* rm_get_gpu_uuid_raw will be called and we can inspect the UUID.
|
|
*/
|
|
rc = nv_open_device(nv, sp);
|
|
if (rc != 0)
|
|
goto out;
|
|
|
|
/* The UUID should always be present following nv_open_device */
|
|
dev_uuid = nv_get_cached_uuid(nv);
|
|
WARN_ON(!dev_uuid);
|
|
if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0)
|
|
break;
|
|
|
|
/* No match, try again. */
|
|
nv_close_device(nv, sp);
|
|
up(&nvl->ldata_lock);
|
|
nvl = find_uuid_candidate(uuid);
|
|
}
|
|
|
|
if (nvl)
|
|
{
|
|
rc = 0;
|
|
WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK);
|
|
}
|
|
else
|
|
rc = -ENODEV;
|
|
|
|
out:
|
|
if (nvl)
|
|
up(&nvl->ldata_lock);
|
|
return rc;
|
|
}
|
|
|
|
/*
|
|
* Like nvidia_dev_put but uses UUID instead of gpu_id.
|
|
*/
|
|
void nvidia_dev_put_uuid(const NvU8 *uuid, nvidia_stack_t *sp)
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
/* Callers must already have called nvidia_dev_get_uuid() */
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_uuid(uuid);
|
|
if (!nvl)
|
|
return;
|
|
|
|
nv_close_device(NV_STATE_PTR(nvl), sp);
|
|
|
|
WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK);
|
|
|
|
up(&nvl->ldata_lock);
|
|
}
|
|
|
|
int nvidia_dev_block_gc6(const NvU8 *uuid, nvidia_stack_t *sp)
|
|
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
/* Callers must already have called nvidia_dev_get_uuid() */
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_uuid(uuid);
|
|
if (!nvl)
|
|
return -ENODEV;
|
|
|
|
if (rm_ref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE) != NV_OK)
|
|
{
|
|
up(&nvl->ldata_lock);
|
|
return -EINVAL;
|
|
}
|
|
|
|
up(&nvl->ldata_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int nvidia_dev_unblock_gc6(const NvU8 *uuid, nvidia_stack_t *sp)
|
|
|
|
{
|
|
nv_linux_state_t *nvl;
|
|
|
|
/* Callers must already have called nvidia_dev_get_uuid() */
|
|
|
|
/* Takes nvl->ldata_lock */
|
|
nvl = find_uuid(uuid);
|
|
if (!nvl)
|
|
return -ENODEV;
|
|
|
|
rm_unref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE);
|
|
|
|
up(&nvl->ldata_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_get_device_memory_config(
|
|
nv_state_t *nv,
|
|
NvU64 *compr_addr_sys_phys,
|
|
NvU64 *addr_guest_phys,
|
|
NvU32 *addr_width,
|
|
NvS32 *node_id
|
|
)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
|
|
|
|
if (!nv_platform_supports_numa(nvl))
|
|
{
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
#if defined(NVCPU_PPC64LE)
|
|
nv_npu_numa_info_t *numa_info;
|
|
|
|
numa_info = &nvl->npu->numa_info;
|
|
|
|
if (node_id != NULL)
|
|
{
|
|
*node_id = nvl->numa_info.node_id;
|
|
}
|
|
|
|
if (compr_addr_sys_phys != NULL)
|
|
{
|
|
*compr_addr_sys_phys =
|
|
numa_info->compr_sys_phys_addr;
|
|
}
|
|
|
|
if (addr_guest_phys != NULL)
|
|
{
|
|
*addr_guest_phys =
|
|
numa_info->guest_phys_addr;
|
|
}
|
|
|
|
if (addr_width != NULL)
|
|
{
|
|
*addr_width = nv_volta_dma_addr_size - nv_volta_addr_space_width;
|
|
}
|
|
|
|
status = NV_OK;
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return status;
|
|
}
|
|
|
|
#if defined(NVCPU_PPC64LE)
|
|
|
|
NV_STATUS NV_API_CALL nv_get_nvlink_line_rate(
|
|
nv_state_t *nvState,
|
|
NvU32 *linerate
|
|
)
|
|
{
|
|
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) && defined(NV_OF_GET_PROPERTY_PRESENT)
|
|
|
|
nv_linux_state_t *nvl;
|
|
struct pci_dev *npuDev;
|
|
NvU32 *pSpeedPtr = NULL;
|
|
NvU32 speed;
|
|
int len;
|
|
|
|
if (nvState != NULL)
|
|
nvl = NV_GET_NVL_FROM_NV_STATE(nvState);
|
|
else
|
|
return NV_ERR_INVALID_ARGUMENT;
|
|
|
|
if (!nvl->npu)
|
|
{
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
npuDev = nvl->npu->devs[0];
|
|
if (!npuDev->dev.of_node)
|
|
{
|
|
nv_printf(NV_DBG_ERRORS, "NVRM: %s: OF Node not found in IBM-NPU device node\n",
|
|
__FUNCTION__);
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
pSpeedPtr = (NvU32 *) of_get_property(npuDev->dev.of_node, "ibm,nvlink-speed", &len);
|
|
|
|
if (pSpeedPtr)
|
|
{
|
|
speed = (NvU32) be32_to_cpup(pSpeedPtr);
|
|
}
|
|
else
|
|
{
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
if (!speed)
|
|
{
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
else
|
|
{
|
|
*linerate = speed;
|
|
}
|
|
|
|
return NV_OK;
|
|
|
|
#endif
|
|
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
#endif
|
|
|
|
NV_STATUS NV_API_CALL nv_indicate_idle(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
struct file *file = nvl->sysfs_config_file;
|
|
loff_t f_pos = 0;
|
|
char buf;
|
|
|
|
pm_runtime_put_noidle(dev);
|
|
|
|
#if defined(NV_SEQ_READ_ITER_PRESENT)
|
|
{
|
|
struct kernfs_open_file *of = ((struct seq_file *)file->private_data)->private;
|
|
struct kernfs_node *kn;
|
|
|
|
mutex_lock(&of->mutex);
|
|
kn = of->kn;
|
|
if (kn != NULL && atomic_inc_unless_negative(&kn->active))
|
|
{
|
|
if ((kn->attr.ops != NULL) && (kn->attr.ops->read != NULL))
|
|
{
|
|
kn->attr.ops->read(of, &buf, 1, f_pos);
|
|
}
|
|
atomic_dec(&kn->active);
|
|
}
|
|
mutex_unlock(&of->mutex);
|
|
}
|
|
#else
|
|
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
|
|
kernel_read(file, &buf, 1, &f_pos);
|
|
#else
|
|
kernel_read(file, f_pos, &buf, 1);
|
|
#endif
|
|
#endif
|
|
|
|
return NV_OK;
|
|
#else
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
#endif
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_indicate_not_idle(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
|
|
pm_runtime_get_noresume(dev);
|
|
|
|
nvl->is_forced_shutdown = NV_TRUE;
|
|
pci_bus_type.shutdown(dev);
|
|
|
|
return NV_OK;
|
|
#else
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
#endif
|
|
}
|
|
|
|
void NV_API_CALL nv_idle_holdoff(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
|
|
pm_runtime_get_noresume(dev);
|
|
#endif
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_dynamic_power_available(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
return nvl->sysfs_config_file != NULL;
|
|
#else
|
|
return NV_FALSE;
|
|
#endif
|
|
}
|
|
|
|
/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */
|
|
void nv_linux_add_device_locked(nv_linux_state_t *nvl)
|
|
{
|
|
if (nv_linux_devices == NULL) {
|
|
nv_linux_devices = nvl;
|
|
}
|
|
else
|
|
{
|
|
nv_linux_state_t *tnvl;
|
|
for (tnvl = nv_linux_devices; tnvl->next != NULL; tnvl = tnvl->next);
|
|
tnvl->next = nvl;
|
|
}
|
|
}
|
|
|
|
/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */
|
|
void nv_linux_remove_device_locked(nv_linux_state_t *nvl)
|
|
{
|
|
if (nvl == nv_linux_devices) {
|
|
nv_linux_devices = nvl->next;
|
|
}
|
|
else
|
|
{
|
|
nv_linux_state_t *tnvl;
|
|
for (tnvl = nv_linux_devices; tnvl->next != nvl; tnvl = tnvl->next);
|
|
tnvl->next = nvl->next;
|
|
}
|
|
}
|
|
|
|
void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable)
|
|
{
|
|
int count;
|
|
|
|
if (bEnable)
|
|
{
|
|
for (count = 0; count < nv->num_soc_irqs; count++)
|
|
{
|
|
nv->soc_irq_info[count].bh_pending = NV_FALSE;
|
|
nv->current_soc_irq = -1;
|
|
enable_irq(nv->soc_irq_info[count].irq_num);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
for (count = 0; count < nv->num_soc_irqs; count++)
|
|
{
|
|
disable_irq_nosync(nv->soc_irq_info[count].irq_num);
|
|
}
|
|
}
|
|
}
|
|
|
|
NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
return nvl->minor_num;
|
|
}
|
|
|
|
NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd)
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return NV_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
/*
|
|
* Wakes up the NVIDIA GPU HDA codec and contoller by reading
|
|
* codec proc file.
|
|
*/
|
|
void NV_API_CALL nv_audio_dynamic_power(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
/*
|
|
* The runtime power management for nvidia HDA controller can be possible
|
|
* after commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA
|
|
* controller"). This commit has also moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO'
|
|
* macro from <sound/hdaudio.h> to <linux/pci_ids.h>.
|
|
* If 'NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT' is not defined, then
|
|
* this function will be stub function.
|
|
*
|
|
* Also, check if runtime PM is enabled in the kernel (with
|
|
* 'NV_PM_RUNTIME_AVAILABLE') and stub this function if it is disabled. This
|
|
* function uses kernel fields only present when the kconfig has runtime PM
|
|
* enabled.
|
|
*/
|
|
#if defined(NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT) && defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
struct pci_dev *audio_pci_dev, *pci_dev;
|
|
struct snd_card *card;
|
|
|
|
if (!nv_dev_is_pci(dev))
|
|
return;
|
|
|
|
pci_dev = to_pci_dev(dev);
|
|
|
|
audio_pci_dev = os_pci_init_handle(NV_PCI_DOMAIN_NUMBER(pci_dev),
|
|
NV_PCI_BUS_NUMBER(pci_dev),
|
|
NV_PCI_SLOT_NUMBER(pci_dev),
|
|
1, NULL, NULL);
|
|
|
|
if (audio_pci_dev == NULL)
|
|
return;
|
|
|
|
/*
|
|
* Check if HDA controller is in pm suspended state. The HDA contoller
|
|
* can not be runtime resumed if this API is called during system
|
|
* suspend/resume time and HDA controller is in pm suspended state.
|
|
*/
|
|
if (audio_pci_dev->dev.power.is_suspended)
|
|
return;
|
|
|
|
card = pci_get_drvdata(audio_pci_dev);
|
|
if (card == NULL)
|
|
return;
|
|
|
|
/*
|
|
* Commit be57bfffb7b5 ("ALSA: hda: move hda_codec.h to include/sound")
|
|
* in v4.20-rc1 moved "hda_codec.h" header file from the private sound
|
|
* folder to include/sound.
|
|
*/
|
|
#if defined(NV_SOUND_HDA_CODEC_H_PRESENT)
|
|
{
|
|
struct list_head *p;
|
|
struct hda_codec *codec = NULL;
|
|
unsigned int cmd, res;
|
|
|
|
/*
|
|
* Traverse the list of devices which the sound card maintains and
|
|
* search for HDA codec controller.
|
|
*/
|
|
list_for_each_prev(p, &card->devices)
|
|
{
|
|
struct snd_device *pdev = list_entry(p, struct snd_device, list);
|
|
|
|
if (pdev->type == SNDRV_DEV_CODEC)
|
|
{
|
|
codec = pdev->device_data;
|
|
|
|
/*
|
|
* NVIDIA HDA codec controller uses linux kernel HDA codec
|
|
* driver. Commit 05852448690d ("ALSA: hda - Support indirect
|
|
* execution of verbs") added support for overriding exec_verb.
|
|
* This codec->core.exec_verb will be codec_exec_verb() for
|
|
* NVIDIA HDA codec driver.
|
|
*/
|
|
if (codec->core.exec_verb == NULL)
|
|
{
|
|
return;
|
|
}
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (codec == NULL)
|
|
{
|
|
return;
|
|
}
|
|
|
|
/* If HDA codec controller is already runtime active, then return */
|
|
if (snd_hdac_is_power_on(&codec->core))
|
|
{
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Encode codec verb for getting vendor ID from root node.
|
|
* Refer Intel High Definition Audio Specification for more details.
|
|
*/
|
|
cmd = (codec->addr << 28) | (AC_NODE_ROOT << 20) |
|
|
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
|
|
|
|
/*
|
|
* It will internally increment the runtime PM refcount,
|
|
* wake-up the audio codec controller and send the HW
|
|
* command for getting vendor ID. Once the vendor ID will be
|
|
* returned back, then it will decrement the runtime PM refcount
|
|
* and runtime suspend audio codec controller again (If refcount is
|
|
* zero) once auto suspend counter expires.
|
|
*/
|
|
codec->core.exec_verb(&codec->core, cmd, 0, &res);
|
|
}
|
|
#else
|
|
{
|
|
int codec_addr;
|
|
|
|
/*
|
|
* The filp_open() call below depends on the current task's fs_struct
|
|
* (current->fs), which may already be NULL if this is called during
|
|
* process teardown.
|
|
*/
|
|
if (current->fs == NULL)
|
|
return;
|
|
|
|
/* If device is runtime active, then return */
|
|
if (audio_pci_dev->dev.power.runtime_status == RPM_ACTIVE)
|
|
return;
|
|
|
|
for (codec_addr = 0; codec_addr < NV_HDA_MAX_CODECS; codec_addr++)
|
|
{
|
|
char filename[48];
|
|
NvU8 buf;
|
|
int ret;
|
|
|
|
ret = snprintf(filename, sizeof(filename),
|
|
"/proc/asound/card%d/codec#%d",
|
|
card->number, codec_addr);
|
|
|
|
if (ret > 0 && ret < sizeof(filename) &&
|
|
(os_open_and_read_file(filename, &buf, 1) == NV_OK))
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd)
|
|
{
|
|
nv_linux_state_t *nvl = NULL;
|
|
dev_t rdev = 0;
|
|
|
|
if (filp == NULL ||
|
|
filp->private_data == NULL ||
|
|
NV_FILE_INODE(filp) == NULL)
|
|
return 0;
|
|
|
|
rdev = (NV_FILE_INODE(filp))->i_rdev;
|
|
if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER)
|
|
return 0;
|
|
|
|
nvl = NV_GET_NVL_FROM_FILEP(filp);
|
|
if (nvl == NULL)
|
|
return 0;
|
|
|
|
return (data == nvl);
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *nv, void *os_info)
|
|
{
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
return nv_match_dev_state(nvl, os_info, -1);
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv)
|
|
{
|
|
struct files_struct *files = current->files;
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
|
|
#ifdef NV_ITERATE_FD_PRESENT
|
|
return !!iterate_fd(files, 0, nv_match_dev_state, nvl);
|
|
#else
|
|
struct fdtable *fdtable;
|
|
int ret_val = 0;
|
|
int fd = 0;
|
|
|
|
if (files == NULL)
|
|
return 0;
|
|
|
|
spin_lock(&files->file_lock);
|
|
|
|
for (fdtable = files_fdtable(files); fd < fdtable->max_fds; fd++)
|
|
{
|
|
struct file *filp;
|
|
|
|
#ifdef READ_ONCE
|
|
filp = READ_ONCE(fdtable->fd[fd]);
|
|
#else
|
|
filp = ACCESS_ONCE(fdtable->fd[fd]);
|
|
smp_read_barrier_depends();
|
|
#endif
|
|
if (filp == NULL)
|
|
continue;
|
|
|
|
ret_val = nv_match_dev_state(nvl, filp, fd);
|
|
if (ret_val)
|
|
break;
|
|
}
|
|
|
|
spin_unlock(&files->file_lock);
|
|
|
|
return !!ret_val;
|
|
#endif
|
|
}
|
|
|
|
|
|
NvBool NV_API_CALL nv_platform_supports_s0ix(void)
|
|
{
|
|
#if defined(CONFIG_ACPI)
|
|
return (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) != 0;
|
|
#else
|
|
return NV_FALSE;
|
|
#endif
|
|
}
|
|
|
|
NvBool NV_API_CALL nv_s2idle_pm_configured(void)
|
|
{
|
|
NvU8 buf[8];
|
|
|
|
#if defined(NV_SEQ_READ_ITER_PRESENT)
|
|
struct file *file;
|
|
ssize_t num_read;
|
|
struct kiocb kiocb;
|
|
struct iov_iter iter;
|
|
struct kvec iov = {
|
|
.iov_base = &buf,
|
|
.iov_len = sizeof(buf),
|
|
};
|
|
|
|
if (os_open_readonly_file("/sys/power/mem_sleep", (void **)&file) != NV_OK)
|
|
{
|
|
return NV_FALSE;
|
|
}
|
|
|
|
init_sync_kiocb(&kiocb, file);
|
|
kiocb.ki_pos = 0;
|
|
iov_iter_kvec(&iter, READ, &iov, 1, sizeof(buf));
|
|
|
|
num_read = seq_read_iter(&kiocb, &iter);
|
|
|
|
os_close_file((void *)file);
|
|
|
|
if (num_read != sizeof(buf))
|
|
{
|
|
return NV_FALSE;
|
|
}
|
|
#else
|
|
if (os_open_and_read_file("/sys/power/mem_sleep", buf,
|
|
sizeof(buf)) != NV_OK)
|
|
{
|
|
return NV_FALSE;
|
|
}
|
|
#endif
|
|
|
|
return (memcmp(buf, "[s2idle]", 8) == 0);
|
|
}
|
|
|
|
|
|
/*
|
|
* Function query system chassis info, to figure out if the platform is
|
|
* Laptop or Notebook.
|
|
* This function should be used when querying GPU form factor information is
|
|
* not possible via core RM or if querying both system and GPU form factor
|
|
* information is necessary.
|
|
*/
|
|
NvBool NV_API_CALL nv_is_chassis_notebook(void)
|
|
{
|
|
const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
|
|
|
|
//
|
|
// Return true only for Laptop & Notebook
|
|
// As per SMBIOS spec Laptop = 9 and Notebook = 10
|
|
//
|
|
return (chassis_type && (!strcmp(chassis_type, "9") || !strcmp(chassis_type, "10")));
|
|
}
|
|
|
|
void NV_API_CALL nv_allow_runtime_suspend
|
|
(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
|
if (dev->power.runtime_auto == false)
|
|
{
|
|
dev->power.runtime_auto = true;
|
|
atomic_add_unless(&dev->power.usage_count, -1, 0);
|
|
}
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
#endif
|
|
}
|
|
|
|
void NV_API_CALL nv_disallow_runtime_suspend
|
|
(
|
|
nv_state_t *nv
|
|
)
|
|
{
|
|
#if defined(NV_PM_RUNTIME_AVAILABLE)
|
|
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
|
struct device *dev = nvl->dev;
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
|
if (dev->power.runtime_auto == true)
|
|
{
|
|
dev->power.runtime_auto = false;
|
|
atomic_inc(&dev->power.usage_count);
|
|
}
|
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
#endif
|
|
}
|
|
|
|
NvU32 NV_API_CALL nv_get_os_type(void)
|
|
{
|
|
return OS_TYPE_LINUX;
|
|
}
|
|
|
|
void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
|
|
{
|
|
#if NVCPU_IS_PPC64LE
|
|
return nv_ibmnpu_cache_flush_range(nv, cpu_virtual, size);
|
|
#elif NVCPU_IS_AARCH64
|
|
|
|
NvU64 va, cbsize;
|
|
NvU64 end_cpu_virtual = cpu_virtual + size;
|
|
|
|
nv_printf(NV_DBG_INFO,
|
|
"Flushing CPU virtual range [0x%llx, 0x%llx)\n",
|
|
cpu_virtual, end_cpu_virtual);
|
|
|
|
cbsize = cache_line_size();
|
|
// Align address to line size
|
|
cpu_virtual = NV_ALIGN_UP(cpu_virtual, cbsize);
|
|
|
|
// Force eviction of any cache lines from the NUMA-onlined region.
|
|
for (va = cpu_virtual; va < end_cpu_virtual; va += cbsize)
|
|
{
|
|
asm volatile("dc civac, %0" : : "r" (va): "memory");
|
|
// Reschedule if necessary to avoid lockup warnings
|
|
cond_resched();
|
|
}
|
|
asm volatile("dsb sy" : : : "memory");
|
|
|
|
#endif
|
|
}
|
|
|
|
static struct resource *nv_next_resource(struct resource *p)
|
|
{
|
|
if (p->child != NULL)
|
|
return p->child;
|
|
|
|
while ((p->sibling == NULL) && (p->parent != NULL))
|
|
p = p->parent;
|
|
|
|
return p->sibling;
|
|
}
|
|
|
|
/*
|
|
* Function to get the correct PCI Bus memory window which can be mapped
|
|
* in the real mode emulator (emu).
|
|
* The function gets called during the initialization of the emu before
|
|
* remapping it to OS.
|
|
*/
|
|
void NV_API_CALL nv_get_updated_emu_seg(
|
|
NvU32 *start,
|
|
NvU32 *end
|
|
)
|
|
{
|
|
struct resource *p;
|
|
|
|
if (*start >= *end)
|
|
return;
|
|
|
|
for (p = iomem_resource.child; (p != NULL); p = nv_next_resource(p))
|
|
{
|
|
/* If we passed the resource we are looking for, stop */
|
|
if (p->start > *end)
|
|
{
|
|
p = NULL;
|
|
break;
|
|
}
|
|
|
|
/* Skip until we find a range that matches what we look for */
|
|
if (p->end < *start)
|
|
continue;
|
|
|
|
if ((p->end > *end) && (p->child))
|
|
continue;
|
|
|
|
if ((p->flags & IORESOURCE_MEM) != IORESOURCE_MEM)
|
|
continue;
|
|
|
|
/* Found a match, break */
|
|
break;
|
|
}
|
|
|
|
if (p != NULL)
|
|
{
|
|
*start = max((resource_size_t)*start, p->start);
|
|
*end = min((resource_size_t)*end, p->end);
|
|
}
|
|
}
|