Updating prebuilts and/or headers

d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile
7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING
5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md
6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh
05e911b99b109a721d2045f025b21189e2718e60 - README.md
ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md
af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md
07bd07999f296d935386a8edf719d0e296f63227 - kernel-open/Kbuild
45b68e3eacda04dcadce48a8238574302a71a3ca - kernel-open/Makefile
70bd025d834add7e40a0ee69e48612a289df1f07 - kernel-open/conftest.sh
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf
19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h
4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h
92de3baafe321dd0dcf8665aae4614d5ac670718 - kernel-open/common/inc/rs_access.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h
6d2f660ef0942edf664874f260266ec81cd0ff08 - kernel-open/common/inc/nvtypes.h
d580300f41118dacc5569fffa9f47e78c5883141 - kernel-open/common/inc/nv-modeset-interface.h
5bc7a748c7d3dfa6559ca4f9fe6199e17098ec8f - kernel-open/common/inc/nv-lock.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h
8b19b93e958aca626899f035334a4c96f8776eb6 - kernel-open/common/inc/nv.h
ede1f77acb43e28391bceac058e00a7a8d799b0d - kernel-open/common/inc/nvmisc.h
ae374d3e438f8d3b60df8c4602618c58564b73f9 - kernel-open/common/inc/rm-gpu-ops.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h
3e8075872e2efa843b74b884ef5098468edc4f18 - kernel-open/common/inc/nvimpshared.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h
0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h
d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h
b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h
a3d1e51c0f4217f1dc4cb0c48aa0eafd054d4e5e - kernel-open/common/inc/nv-procfs-utils.h
81592e5c17bebad04cd11d73672c859baa070329 - kernel-open/common/inc/nv-chardev-numbers.h
61cf8f3fd32142dc402f6802b5d4c9af6c875c35 - kernel-open/common/inc/nv-firmware.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - kernel-open/common/inc/nv_dpy_id.h
61a9589e4a8ec122e5a6c2258658d493ee747897 - kernel-open/common/inc/nv-platform.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h
4f487eccd762f3ca645a685d5c333ff569e7987c - kernel-open/common/inc/nv-kthread-q-os.h
4015c4557ea0790a2bdf5695832c89e31d75aee9 - kernel-open/common/inc/nvlimits.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h
56f432032bef4683c2801f46bec5065923475fb1 - kernel-open/common/inc/nv-kthread-q.h
b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h
ea5d5f391b3d634e97c8f9463045bd314c81fe14 - kernel-open/common/inc/nv-linux.h
7c7888550b12eeb98128ea9ac771b897327f538e - kernel-open/common/inc/nv-hypervisor.h
f9cb3701681994ff6f32833892d900b0da2b89f6 - kernel-open/common/inc/nv-pgprot.h
b8700a911ac85770bf25d70b9692308af63966bd - kernel-open/common/inc/nvstatuscodes.h
3a5f4f105672921b857fec7f2b577d9d525afe37 - kernel-open/common/inc/nv-timer.h
5cd0b3f9c7f544e9064efc9b5ba4f297e5494315 - kernel-open/common/inc/nv-time.h
7a78f354e0b68f03d6ab566d5b755e299456f361 - kernel-open/common/inc/os_gpio.h
3d5aa82afa27ba6edd0ebbf500e8b07a8d1dd788 - kernel-open/common/inc/nv-proto.h
2eb11e523a3ecba2dcd68f3146e1e666a44256ae - kernel-open/common/inc/nv-ioctl.h
1328058925b64e97588d670fe70466b31af7c7c1 - kernel-open/common/inc/nv-mm.h
fba1f7a11efe4362f60cbf3ec71a9a2a3b28a9d8 - kernel-open/common/inc/nv-pci.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h
d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - kernel-open/common/inc/nv_mig_types.h
b3258444b6a2c2399f5f00c7cac5b470c41caeaa - kernel-open/common/inc/nv-hash.h
29309411e2bf1c2e6492a104dcb9f53705c2e9aa - kernel-open/common/inc/nvkms-kapi.h
44cb5bc2bc87a5c3447bcb61f2ce5aef08c07fa7 - kernel-open/common/inc/nv_uvm_interface.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h
c54c62de441828282db9a4f5b35c2fa5c97d94f1 - kernel-open/common/inc/nvkms-api-types.h
ade7410c1c0572dbed49b4b0d97b87245ca59115 - kernel-open/common/inc/os-interface.h
2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - kernel-open/common/inc/nv-ioctl-numa.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/common/inc/nv_common_utils.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h
dfd7b82a7f2939d4c1869840059705c6b71bffe3 - kernel-open/common/inc/nv-msi.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h
894ef9e230604572bbceabdfd5f241059d54aa10 - kernel-open/common/inc/nv_speculation_barrier.h
107d1ecb8a128044260915ea259b1e64de3defea - kernel-open/common/inc/nv-ioctl-numbers.h
19cfcbf5a3021aa9aaa0ceacbb6711e7f7a6e09e - kernel-open/common/inc/nv_uvm_user_types.h
cfcd2ef5eaec92f8e4647fff02a3b7e16473cbff - kernel-open/common/inc/nv_uvm_types.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h
70b67003fda6bdb8a01fa1e41c3b0e25136a856c - kernel-open/common/inc/os/nv_memory_area.h
11b09260232a88aa1f73f109fdfab491a7b73576 - kernel-open/nvidia/nv-nano-timer.c
dcf4427b83cce7737f2b784d410291bf7a9612dc - kernel-open/nvidia/nv-reg.h
0b8ff957fb14f20ba86f61e556d1ab15bf5acd74 - kernel-open/nvidia/nv-imp.c
6b09b5ef8a37f78c8e82074b06b40ef593c81807 - kernel-open/nvidia/libspdm_rsa.c
b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c
66e2bfc490fb77e0b72a8192b719d3dc74d25d59 - kernel-open/nvidia/nv-pat.c
26a30f2d26c2a97a6e2ee457d97d32f48b0bf25b - kernel-open/nvidia/nv-vm.c
b8a770cea0629c57d8b0b3d7414d7b0f043ee8cf - kernel-open/nvidia/libspdm_ecc.c
4c183eb39251cd78d90868ec6f75ebc7a37e6644 - kernel-open/nvidia/os-usermap.c
8c30b6230439edcbec62636cc93be512bca8637f - kernel-open/nvidia/nv-usermap.c
7af675f85642229b7e7de05dcadd622550fe7ad7 - kernel-open/nvidia/nv-vtophys.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c
ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c
1590794925ebd9cbc14aae8c47e0cc205a3f4b52 - kernel-open/nvidia/nv-rsync.h
934a686ba8d7b77cce2d928cb3b04f611d9f9187 - kernel-open/nvidia/libspdm_aead.c
f16e6a33b5004566333fb8b99504a0fb95d51226 - kernel-open/nvidia/nv-gpio.c
8ed2c3b93eeaa52342d944e794180fd5d386688a - kernel-open/nvidia/libspdm_rsa_ext.c
2e5d18118835c19c5ca7edee9bceeae613b9d7f9 - kernel-open/nvidia/nv-procfs.c
23009178fd74b867836e8c23055544a900aeea7a - kernel-open/nvidia/nv.c
65fe797fb5d4af2db67544ddb79d49ab1b7ca859 - kernel-open/nvidia/nv-dsi-parse-panel-props.c
e3efae4ed920545062a2d06064df8be1a2a42135 - kernel-open/nvidia/nv-caps-imex.h
8c64e75aaaa9ac6f17aae7ed62db23eb2e5b9953 - kernel-open/nvidia/nv_uvm_interface.c
4563589496a93a2720e25807ca1be2565f03554c - kernel-open/nvidia/nv-bpmp.c
aea97021d9aa023a357f009fcddc710f710ceb5e - kernel-open/nvidia/libspdm_x509.c
f29e5bc1c7bd2c670780cdbb7275900a69f4d205 - kernel-open/nvidia/internal_crypt_lib.h
22418fb7ae38c7f153af94edf2f5bd1cbcacb889 - kernel-open/nvidia/nv-modeset-interface.c
6ae527b69eebb44224b05e8cb3546757532d8a16 - kernel-open/nvidia/nv-dma.c
fe204e3820d206b5b0c34a51084f39b97310305a - kernel-open/nvidia/nv-ipc-soc.c
60d6ff5becc0ddbcf4b489b9d88c1dec8ccc67be - kernel-open/nvidia/nv-platform-pm.c
c1f7c81018a414b7a657431b115a1b86d3ebe3e7 - kernel-open/nvidia/os-mlock.c
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia/nv-kthread-q.c
70bece14e12b9ffc92816ee8159a4ce596579d78 - kernel-open/nvidia/os-pci.c
a677049bb56fa5ebe22fe43b0c4a12acd58a6677 - kernel-open/nvidia/nv-p2p.c
e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia/nv-pci-table.c
5aec112c8a7eca49686f36efefe6d6e3e9548e32 - kernel-open/nvidia/nv-pci.c
6dfc57ac42bed97c6ff81d82e493f05b369e0b84 - kernel-open/nvidia/nvspdm_cryptlib_extensions.h
bba706cfbc04b3a880b5e661066f92e765fad663 - kernel-open/nvidia/nv-caps-imex.c
ed3c83f62e4ccc4b53d886eedd4b47518a361393 - kernel-open/nvidia/nv-dmabuf.c
66b7fad4d73a23153298ce777afb14d2c8be42c1 - kernel-open/nvidia/libspdm_internal_crypt_lib.c
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h
9a5a58bd6eb71a4c32e334a1a4e3326a17143cce - kernel-open/nvidia/os-interface.c
1a91f5e6d517856303da448bea80d167b238e41c - kernel-open/nvidia/nv-i2c.c
7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia/nv-pci-table.h
c50865d3070a0c3476ce24ff1ab4cc4e3f9ea4be - kernel-open/nvidia/detect-self-hosted.h
7ae9a57b9e99fd2a3534798e52e57f7784738a53 - kernel-open/nvidia/nv-report-err.c
3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c
dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c
d8b8077adb7fd70eb9528d421bdef98c4378b57a - kernel-open/nvidia/nv-msi.c
23ae2957e6d58ab1b41398b6fdeecb7c475300b9 - kernel-open/nvidia/nv-platform.c
dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c
74958745f83b14c04aaa60248bf5c86ceef6b5cb - kernel-open/nvidia/nv-acpi.c
4d19a1756af848d25fd2fd8cc691dcbcf0afb776 - kernel-open/nvidia/os-registry.c
80f9ac558a57c60cbf70f3ecaf73c71e60c98885 - kernel-open/nvidia/nv-rsync.c
7f5d251db1db4a179a67efea0178fbfda94f95d0 - kernel-open/nvidia/nv_gpu_ops.h
642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c
7d53c2d27580d1b2cc56246d9972f3f310a3cd34 - kernel-open/nvidia/nv-clk.c
0f28ebcdb723e836c923e40642429838fa9e86dc - kernel-open/nvidia/nvidia-sources.Kbuild
99540efd2dfa6907b84e628e12370eefb0222850 - kernel-open/nvidia/nv-mmap.c
11ac7a3a3b4def7fa31a289f5f8461ad90eca06b - kernel-open/nvidia/nv-tracepoint.h
a14b9115cff1e5e7491737083588a5646c8c227b - kernel-open/nvidia/nv-report-err.h
011f975d4f94f7b734efa23d3c8075321eaaf0e8 - kernel-open/nvidia/nv-memdbg.c
c0cf3914fcda832f27472ba2fdb9c0f831f05733 - kernel-open/nvidia/nvidia.Kbuild
ac976b92e83f19125d6b3f7e95d9523e430b9b09 - kernel-open/nvidia/nv-p2p.h
9b036018501d9b8543aabe7ec35dbe33023bb3e0 - kernel-open/nvidia/nv-host1x.c
11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c
02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c
2d61ad39b2356c9cfd8d57c1842e80a20272e37f - kernel-open/nvidia/nv-caps.c
fc199c04b321db79ab5446574d9b994f8bfe6c24 - kernel-open/nvidia/libspdm_shash.c
fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
8af43a3f0e4201aa6ff0099221a371fb1801e818 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
4991dfa8852edbdd1ffbd2d44f7b6ac4e1c8c752 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
7694b027d74d65561ce6cd15a8c0822e4b32b73a - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
8b84a0cc1127f39652362007e048ea568c9cf80b - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
2d7b566655ba8a05fae4ea4f6c806b75d7ebb5f3 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
16dd525c52448a32cc8da75d6a644d8a35efbfee - kernel-open/nvidia/library/spdm_lib_config.h
53a9acf65cad6bc4869a15d8086990365c987456 - kernel-open/nvidia/library/cryptlib.h
cfbaebb1091f7b1a8d2e3c54c2301ac45ade6c40 - kernel-open/nvidia/internal/libspdm_lib_config.h
2ea094687fbee1e116cd0362cbeba7592439e0b6 - kernel-open/nvidia-drm/nvidia-drm-crtc.h
bed7b5053d09473188061b0d7f6a3a65b64f72e0 - kernel-open/nvidia-drm/nvidia-drm-linux.c
0f8e4535cf97fadea23c9848483355583f492131 - kernel-open/nvidia-drm/nvidia-drm-utils.c
35034b6f174cd6a14b7d94a07f777794570959b4 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
072e1d6a260e348dada181162949eee190321ed8 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
e86dac2985f4e61f4e2676b3290e47cdcb951c46 - kernel-open/nvidia-drm/nvidia-drm-modeset.c
f00a605cac7ffc7f309e3952c5d4cea7cbfc0b7e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h
763833186eabf1a0501434426c18161febf624d4 - kernel-open/nvidia-drm/nvidia-drm-fb.h
4bada3ff7bfee8b7e222fc4cafb2ac97c67d7898 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
99a2e922a448b4d76318ec151378c8bbf5971595 - kernel-open/nvidia-drm/nvidia-drm-helper.c
ace74442a1a573cee5ed1f67801ab1bdc142b4ae - kernel-open/nvidia-drm/nvidia-drm.c
94c28482252c983fd97532634ffafea0bf77337a - kernel-open/nvidia-drm/nvidia-drm-ioctl.h
a4f77f8ce94f63f3ca2a970c1935d8da48ab5ccc - kernel-open/nvidia-drm/nvidia-drm-format.c
6ea6eeaae58c4ced3fb2d6bc1128b895165e5a9a - kernel-open/nvidia-drm/nvidia-drm-drv.h
4154c5562cebd2747bd15fb302c19cb0cefe1c9c - kernel-open/nvidia-drm/nvidia-drm-connector.h
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-drm/nv-kthread-q.c
e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia-drm/nv-pci-table.c
47110750cf788e7d9ddb5db85be3658ac660a109 - kernel-open/nvidia-drm/nvidia-drm-fence.h
73a1acab50e65c468cb71b65238a051bc306ae70 - kernel-open/nvidia-drm/nvidia-drm-encoder.h
aa388c0d44060b8586967240927306006531cdb7 - kernel-open/nvidia-drm/nvidia-drm-helper.h
d0b4f4383a7d29be40dd22e36faa96dae12d2364 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h
63a2fec1f2c425e084bdc07ff05bda62ed6b6ff1 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
0e2d8b82e79dfd18fff7ce45e42e501e2e7a06c2 - kernel-open/nvidia-drm/nvidia-drm-drv.c
19031f2eaaaeb0fa1da61681fa6048c3e303848b - kernel-open/nvidia-drm/nvidia-drm-gem.c
71ea2d5b02bf8fb3e8cf6b7c84686e2edbc244d0 - kernel-open/nvidia-drm/nvidia-drm-encoder.c
7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia-drm/nv-pci-table.h
9f57b8724205e03ca66b32fe710cd36b82932528 - kernel-open/nvidia-drm/nvidia-drm-conftest.h
6e9838b169beffe149ba12625acb496504d36d50 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
d2525a36b7aec71982df80a89b861f220312103d - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
a505f0aa98ebcf438307f6bacf9bf5a5be189839 - kernel-open/nvidia-drm/nvidia-drm-connector.c
d5518597469dc874ee7e264b9400db51af2fcd44 - kernel-open/nvidia-drm/nvidia-drm-format.h
437d87e7e4bd34ae3c67b27c2faaa394575acf70 - kernel-open/nvidia-drm/nvidia-drm-priv.h
88b2035ddbba8c7f455209e61256b4e7b09c11dd - kernel-open/nvidia-drm/nvidia-drm-fence.c
eff6a0b72274c8824b7a79e9aee261da3a6fb4f1 - kernel-open/nvidia-drm/nvidia-drm-gem.h
6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h
46a41b0b3470190abcdc57a739238a9cd773812b - kernel-open/nvidia-drm/nvidia-drm.Kbuild
995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/nvidia-drm/nv_common_utils.h
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h
d924c494620760887546f428f87387d8ed5b99a6 - kernel-open/nvidia-drm/nvidia-drm-fb.c
5eb8385042f3efa5c2e14d168cdb40b211467552 - kernel-open/nvidia-drm/nvidia-drm-crtc.c
62a9b9b30fd7417d9ab085b2bfc731aadd9826f9 - kernel-open/nvidia-drm/nvidia-drm-os-interface.c
ca86fee8bd52e6c84e376199c5f3890078bc2031 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
73d346cb00350ecef5cb841316f24021d8b15e8d - kernel-open/nvidia-modeset/nvidia-modeset-linux.c
885370cd7fc5f4d9d685d9d8d3e21460fd30cb38 - kernel-open/nvidia-modeset/nvkms.h
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-modeset/nv-kthread-q.c
8ef1c6aa1976e45bdf9c3eded10434daa5b79341 - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h
13d4f9648118dd25b790be0d8d72ebaa12cc8d0e - src/common/sdk/nvidia/inc/rs_access.h
579be4859587206460d8729804aab19180fb69bb - src/common/sdk/nvidia/inc/nvtypes.h
993f17e3094243623f793ae16bd84b5fa3f335ec - src/common/sdk/nvidia/inc/g_finn_rm_api.h
a54d77d45f9b0c5ae3fa8b59d2117145260800b6 - src/common/sdk/nvidia/inc/cc_drv.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h
78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h
ede1f77acb43e28391bceac058e00a7a8d799b0d - src/common/sdk/nvidia/inc/nvmisc.h
46966ed7fc8d85931b49b12683c42666181f33f6 - src/common/sdk/nvidia/inc/nvimpshared.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h
f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h
cf1de27d5bcbd0adbe3c3b64466193b7d9094c71 - src/common/sdk/nvidia/inc/nverror.h
4015c4557ea0790a2bdf5695832c89e31d75aee9 - src/common/sdk/nvidia/inc/nvlimits.h
7c7888550b12eeb98128ea9ac771b897327f538e - src/common/sdk/nvidia/inc/nv-hypervisor.h
b8700a911ac85770bf25d70b9692308af63966bd - src/common/sdk/nvidia/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h
5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h
c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h
bbf6c09ef9bb10ab63d337bf011872f9073c3e5b - src/common/sdk/nvidia/inc/nvos.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h
7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h
774318ced0fdcb199e99cf0fee9688259dd01a51 - src/common/sdk/nvidia/inc/nvfixedtypes.h
ed51b6e2d454af3da36f9c5f4a8a7958d2c5f156 - src/common/sdk/nvidia/inc/alloc/alloc_channel.h
ffe618524466cbbff64de55d88fd987e198bb8c9 - src/common/sdk/nvidia/inc/class/cl9271.h
cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h
9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h
a6bb32861fa3f93ccb16490f0f2751a1ef333eed - src/common/sdk/nvidia/inc/class/cl0101.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h
522682a17bacd5c1d6081c0020d094ee3d5c4a30 - src/common/sdk/nvidia/inc/class/clcb97.h
89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h
f558fddfdc088b86a1b479542b8e782e42a5bdce - src/common/sdk/nvidia/inc/class/clc37a.h
d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h
1dfae8f11f8e92908f59a1c9493e84ce40d53b90 - src/common/sdk/nvidia/inc/class/cl0070.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h
99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h
c4f090f0dae5bdebf28c514c1b5a9bd8606aa56c - src/common/sdk/nvidia/inc/class/cl9097.h
4b77798281f3754a80961308d44a70b1a717283b - src/common/sdk/nvidia/inc/class/clc46f.h
bd2a88f8dbc64add00ad366aa3e76d116cb090b3 - src/common/sdk/nvidia/inc/class/cl0073.h
e587a693bc1cee68983a7039ddbf16a3d3461d64 - src/common/sdk/nvidia/inc/class/cl9471.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h
74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h
fd16daebcd23a680b988dde4ae99625434dcb8fa - src/common/sdk/nvidia/inc/class/cl0000.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h
5ca1d01dab6b9e814160ddce868d00aa9a1ead58 - src/common/sdk/nvidia/inc/class/clc873.h
7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h
2240664ad950c9c2e64b6f4d18e05349bc91443c - src/common/sdk/nvidia/inc/class/clc573.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h
101da471fe4e167815425793491e43193e407d9a - src/common/sdk/nvidia/inc/class/clc397.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h
46f74fc51a7ec532330e966cad032782e80808b8 - src/common/sdk/nvidia/inc/class/clcc7b.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h
71e34a03bcfa70edfbec4dbdeade82a932057938 - src/common/sdk/nvidia/inc/class/clc637.h
447fe99b23c5dbe3d2a7601e8228a1a1831c6705 - src/common/sdk/nvidia/inc/class/clcc70.h
89ed6dd37fca994e18e03a5410d865b88e1ff776 - src/common/sdk/nvidia/inc/class/clc87e.h
03d873c3a0e0376440f23171640d9c517f7a34e9 - src/common/sdk/nvidia/inc/class/cl902d.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h
5ee1adc8d952212b37211c6f4f677ba672f5117c - src/common/sdk/nvidia/inc/class/clcc71.h
bd12f7cdc3a01668b9c486dc6456f9263dd459ea - src/common/sdk/nvidia/inc/class/clc57b.h
4b2f2194a1655cc6ae707866f130bbe357d0c21f - src/common/sdk/nvidia/inc/class/clb097tex.h
5409e5af182ac18ef8d13380bdfe7cf2e83d37d7 - src/common/sdk/nvidia/inc/class/clc37b.h
aeb4cbab8d1d0fbd0a5747fa36d6f56c00234b2d - src/common/sdk/nvidia/inc/class/clc097tex.h
36fd6906e2688dad2e7ab648be7e070b9eb6f11d - src/common/sdk/nvidia/inc/class/clc971.h
513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h
53e6252cd85a60698c49a721f4e41da1cb14e5bd - src/common/sdk/nvidia/inc/class/clc97dswspare.h
645adeb829dbcf315bf67ff8387e7a5d982d7b6e - src/common/sdk/nvidia/inc/class/cl00de.h
0f91db32d9e346b4d9f3762c9e59a8f8e5fd0903 - src/common/sdk/nvidia/inc/class/clcc7d.h
a24c2a943c7ceceb8d015f5cd02148f8c4e7c23d - src/common/sdk/nvidia/inc/class/clb097.h
691bb932ea3f60d2b9ad3e4d7fa53ab1a2a5e6c5 - src/common/sdk/nvidia/inc/class/clc870.h
758e2fb8b5d89079f03be09d74964e9246cb180c - src/common/sdk/nvidia/inc/class/clc797.h
f4af32374be4d05a2e55c97053a4f0d1f4b85154 - src/common/sdk/nvidia/inc/class/cl0000_notification.h
1e578eb23dacca047e0b342cce3024b3134f8de9 - src/common/sdk/nvidia/inc/class/clc7b5.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h
b23cdfb66f40c6d9a903f602b8ff4526063b5a2d - src/common/sdk/nvidia/inc/class/clc097.h
0de3548dde4e076cbd0446330b2d5ae4862c1501 - src/common/sdk/nvidia/inc/class/clc973.h
ddb996ff90b80c0f58729b9ac89fa6d2d3950e49 - src/common/sdk/nvidia/inc/class/cla16f.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h
9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h
7a14243de2b228f086810f968a1712627f1333fd - src/common/sdk/nvidia/inc/class/clc36f.h
7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h
64ad2ab88e2006bcdace06e7109981496c39f265 - src/common/sdk/nvidia/inc/class/clc87d.h
36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h
5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h
a4d82d12346918edd0a7564a5c6cbfe849532b7f - src/common/sdk/nvidia/inc/class/clca70.h
159b78a13e43a2afe6c17714a6f8619675480346 - src/common/sdk/nvidia/inc/class/clc86f.h
6ddba2e93c046ae04f48685c73f8f2d9fe74a398 - src/common/sdk/nvidia/inc/class/clc67a.h
83c6378ef27c8b640895a123801d27e6c4fd3754 - src/common/sdk/nvidia/inc/class/clc671.h
7f75433a769a020d9f36996c855c8ce6ab39dd83 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h
20d5608c2d6e55efd6d1756a00739f7a05d3a2b3 - src/common/sdk/nvidia/inc/class/clc361.h
9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h
a39d75d3e479aebaf3849415e156c3cfe427298a - src/common/sdk/nvidia/inc/class/clc771.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h
f7a2fea4725d59e95294c397ede001504b777b0d - src/common/sdk/nvidia/inc/class/clc697.h
f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h
8b94512c9746c6976c4efeee0291bf44bb5e0152 - src/common/sdk/nvidia/inc/class/clcc73.h
60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h
499bc681107a2b7ad7af3d2211b582b8fb9d9761 - src/common/sdk/nvidia/inc/class/clcc7a.h
e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h
2f291dc867e71f625c59f72787b9fb391a16d0e6 - src/common/sdk/nvidia/inc/class/clc638.h
8d2dcc086f892dd58270c9e53e747513ed4b2f93 - src/common/sdk/nvidia/inc/class/clb06f.h
3d262347ab41547d9ccc28a892d24c83c6b1158e - src/common/sdk/nvidia/inc/class/cla06f.h
bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h
ba8f5899df4287b8440bcb9c8e09e10db73ebf12 - src/common/sdk/nvidia/inc/class/clc97a.h
7bfcd7cf1735b2a54839e8a734e2227060ebf570 - src/common/sdk/nvidia/inc/class/clc197.h
e231c552afb3a78da7341ee49bf36940f1f65202 - src/common/sdk/nvidia/inc/class/clc77d.h
821396a58944ba4620f43cf6ee833b7a04d67193 - src/common/sdk/nvidia/inc/class/clc970.h
1f1879fcddf3c3f1f6c44df0e51822ad1bfa1aae - src/common/sdk/nvidia/inc/class/cl9171.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h
02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h
4be055f206ef1049e8a5b824f9f4830eba0e224c - src/common/sdk/nvidia/inc/class/cla26f.h
ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h
a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h
f33b9fdad6ceb534530fecfd16b40a71f5f5cfdc - src/common/sdk/nvidia/inc/class/clc56f.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h
aa6387d7ce55a88789c5731e89dedde57115131c - src/common/sdk/nvidia/inc/class/clc97b.h
86ab048c67a075349622c597fa9c4f2a9a3d8635 - src/common/sdk/nvidia/inc/class/cl9571.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h
bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h
2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h
9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h
094bec72bfa8c618edc139bc353b20433f1c1da2 - src/common/sdk/nvidia/inc/class/cl2080_notification.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h
0ad3b3e00dc83a0487bd96abd5fe467213aa51ad - src/common/sdk/nvidia/inc/class/clc597.h
869e41c3ba08d704fcf00541075986de43d6b090 - src/common/sdk/nvidia/inc/class/cl917b.h
b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h
4c0d054bd0d9935d8d2cedba3f5e910d6b6f8ed3 - src/common/sdk/nvidia/inc/class/clc997.h
1697a9ed528d633a1e78c0071868d7dff899af26 - src/common/sdk/nvidia/inc/class/clc57a.h
8e85d29d4006dbd3a913fcc088be5e8c87bbdabb - src/common/sdk/nvidia/inc/class/cl0100.h
15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h
e3bd2cacd357e411bc1b6b7d7660ffa97c3a7ee3 - src/common/sdk/nvidia/inc/class/clb197.h
16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h
060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h
bd910ff84b9920af83e706a8ab37c68157a372c8 - src/common/sdk/nvidia/inc/class/clc97e.h
b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h
2a031d85b85c4b1e5b278f6010ca8f33b2192de1 - src/common/sdk/nvidia/inc/class/cl90e7.h
9ceb4ec8538818c8b1dcc7ffe885584b8e0f435e - src/common/sdk/nvidia/inc/class/cla097.h
a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h
d8000ab8ef59e64d17b4089c43953ca69b7f605f - src/common/sdk/nvidia/inc/class/clc67e.h
6400b9ad3460dafe00424e3c1b1b7a05ab865a63 - src/common/sdk/nvidia/inc/class/cl50a0.h
7032fd79731907df00a2fe0bbf6c0f4ce87f021d - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
b11e7b13106fd6656d1b8268ffc15700fba58628 - src/common/sdk/nvidia/inc/class/clc371.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h
81b4e4432da8412c119e795662819cfe7558711f - src/common/sdk/nvidia/inc/class/cl917a.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h
848c89981de73d681615266e4e983b74c2ef418f - src/common/sdk/nvidia/inc/class/cla06fsubch.h
2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h
b46b2cfcf72fc2f9722bd42cea8daaeeda861471 - src/common/sdk/nvidia/inc/class/clc871.h
022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h
fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h
6249715d9876f5825ad62f563bf070e93710a2ad - src/common/sdk/nvidia/inc/class/clc67d.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h
7ef21c4f4fd4032c8f25f8fb33669e692a26e700 - src/common/sdk/nvidia/inc/class/clcb97tex.h
73b706e4916f4c70302387c88c8e14e7b2c1f4e6 - src/common/sdk/nvidia/inc/class/clc67b.h
c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h
20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h
9f7f04825f3f218cc0c4610938935e2f0a73e13b - src/common/sdk/nvidia/inc/class/clc97d.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h
0b35244321b1f2f6647f8389f6fa7254c34790e2 - src/common/sdk/nvidia/inc/class/cl90cdtrace.h
39161706917567f434a6fff736b22f3358923e68 - src/common/sdk/nvidia/inc/class/clc06f.h
bc3674f2384cb3695ce5f035ed16e9c39bba4d1b - src/common/sdk/nvidia/inc/class/cl00fe.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h
435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h
b31019107ada7b0fb8247c09d93b95a630821fa8 - src/common/sdk/nvidia/inc/class/clcc7e.h
31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h
83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h
db8dd50ad3e64fe0472d82c0940908d5da5e0321 - src/common/sdk/nvidia/inc/class/cla0b5.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h
8b07d7aca050be883fdc0d6f4b19eac0b0b6c796 - src/common/sdk/nvidia/inc/class/clc673.h
c116d91177c6cbfb8c25e7f35bb49a8d5a51816c - src/common/sdk/nvidia/inc/class/cl008f.h
4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h
5a6098f821e8faa19345313477726431f9271cde - src/common/sdk/nvidia/inc/class/clc661.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h
5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
88947927d79e15df8cbf77a59ac883a29e970413 - src/common/sdk/nvidia/inc/ctrl/ctrlc638.h
625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
a305225ceda0a39c76ed61b819a1f4165f5644f5 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h
be3c9e2de8b8d33fe04389b224fa6ad95ecd089b - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h
c3e3213f548f93592f7d3dfd76e63a2102d800ec - src/common/sdk/nvidia/inc/ctrl/ctrl0076.h
d7415e78725899f9d10fa2d5f03f3d62cef42f26 - src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h
9e343f73f46238075cef766cad499533559dfa28 - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h
f7601ce8c7c2d7a1143bff5280e3e5d9b5c4c147 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h
97ac039e796faca6c9f78e16020fe96225b33492 - src/common/sdk/nvidia/inc/ctrl/ctrlc637.h
fe7ce28fe76174a6de68236b44ea565ba2ea687b - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h
3ba6904c69aa7710c4561d5643b18fc41e141d4e - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
b178067ba5f93e7fafb4c2ee0f5032acf9bc55d7 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h
58a5d3a55b2d9b29d4f1b1e7b5d4d02ae6885e30 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
16a24249210637987d17af6069ae5168404743ee - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
58f8e48d5851cc10e3c5fd3655d7948b9f327ca0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
b86c4d68c5758f9813f00cc562110c72ef602da7 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h
c042a366bc755def9e4132e2768c1675871dbe65 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
9d908bb15aecc9d8094e1b6c13301efba6032079 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
2e65ccd2704919780a152c69f53400a0dc5e6e41 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h
4fb7753f3502303314d9e8f853ee3b752f7e9317 - src/common/sdk/nvidia/inc/ctrl/ctrl0100.h
8764e07e9d348163db4eb41b0c3cf32c76172c0d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
f21c15122509a8843e676a2bd5e799c58cd96379 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
326b61039197db58d8369256f6d7dc9764aea421 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
e7452921bdbd036ca3a37c60c49829c05e95c2d5 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
5f3b68d39f14137d33f239408a6a13543f4ac966 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
8fcc64b22b0f6cde40d5ecd23e5e2444277a5999 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
70d65d4f923ec0efd8931433ae50930d12f78a07 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
11abea0cdf485863196de56169451980ee6c016a - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
08dda80bac8d3418ad08e291012cf315dc9e5805 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
add9e3867e3dbd2c11bed36604680af4aaa0f164 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
2ffb93d092df65570b074ad97f0bb436a1c66dff - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
79fd7ed84cb238ea90ea3691f40ea7140034d3dc - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
2ea79d79223b06633fb7f541ebbe5a300ba3885f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
44c9aa512eb0b9b92cace9e674299f2a9227c37c - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
a427892e601a4ca4f88cc5778ff78895324f3728 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
7ef9e10955708592e92e127eb3fb372adff44818 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
3c1bd0db339456c335acd50a75ace42cb8bbe6f8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
be10e3f4a9dd2f2ab35305ee0af628ef339b25a7 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
db66195c8e7252c5f424953275cbb7be90a17ba8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h
7318f74523bb6a015e561dba1a06b47a278d856d - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h
702d9cb471a344a25911449cc580f69f7155ab1c - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
19c7eff334c591c803dcd93fc0818798c281df48 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h
c7dcbc0ae7454df6523c6deb5f07a70dc2fdbc15 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h
882b13d54585a6fc5534d12b9cdcec29c8cde337 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h
76fb63a6782ff1236303fdd7bf2698f42965a266 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h
00d2655f569187190bd117bdf37fe4ddd5e92320 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h
8064c31eb1e447561c415f9835aecac97d5f3517 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
713aa1291aef3f79304ad35c5143a7576f242f63 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
bb7955387f6a286927e7922019676ca0aba713e6 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
35367f08b96510a5312653b5197d6bb34c0a3d00 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
a0cf9dfb520e3320cd9c154c01cd2f1a7bbbd864 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
c2066c407f81538047c435fffca2705c28107663 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
d727b328e995a7d969ec036f2d5b52264568a7bf - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
134d43961ea1d42fc36d75685fdd7944f92b0b53 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
79b38bbe679d397b48b78266aa5f50459fe5b5bc - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
514d012dbfd9e056b7f729bccb213fa9193d433e - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h
6ef99465758f71f420ac17765380cc37dbcac68a - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
f4ed3ccff4720114d1aaed82484ed70cf07626db - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
ba3b73356bf0d1409ecfd963b623c50ec83f1813 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h
bb0a5ff091ef854b19e7da0043b7b7b10232c3de - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h
1f25c9f215991f34fee94dafac5fad0e7460db1c - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
ddeb0df51d5f662948f9098a5d85b40c8ab6504b - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
ee99443c1bd3441df474566622486b04c4502ac0 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
44e1b06211eee31e42e81879f5220f26ddec70ae - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
ff789d585a7f001b8bd32e07a268c635d39b17ab - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
03f54e22b39ad5cf682eada7147c6c155f16b385 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
8fdb493bda6119025c1d00f289a6394e7dcd1b53 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
41a0a14e04527fa2c349d2895bb41affd154c999 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
3423a69bba50e1405b5a7d631bfff1f6f0a1673f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
146263409e5304f661da349b56761ab7403144bd - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
70dc706ea4ee7b143a716aae9e4f8c0bcef6c249 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
0a156fc54f45386fabd06ef5ec11ba3a816fbfb7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
c157e185d3c64ee9476ddc75881bfc5a5b8b997f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
785d96360f86bc53eb428fd3f4fbeda395400c8a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
24a891a02e1a882769d4da3454e4dfcf42b1ea6c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
6969b092708d57f88b0f0fdbb3464c786f90710c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
013bd8d50841ea314f5ea2bd507470f2c3aff831 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
d63388ff48ca055c82bcd6148506eacd0e26b4dc - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h
96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
d15e8e86ca66b3a69a774e322dfdd349b9f978b9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h
898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
9933e90ad92eb7df2f64dcc30dcd680d5f7c530d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
18d1a44b7113c1707bbf5c65fb1be790304c0bed - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
c8f1c115d78bab309c0a887324b0dabfb8f9ea2d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
ecceb8f7382c8f55c6ccd0330e14ccbc49fcd09c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
2577a1d505a3d682e223fbcbc6d4c7d13162749d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
d3969094e68f9d584ba9c6fb5457801caff6ccc1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h
74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
115f683e5926ae130de87e4cea805ef6915ed728 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
baeb07c8bdadf835db754452f63d40956bc6a199 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
338c7de5d574fe91cda1372c5221e754d4c4b717 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
4e4a4f9e94f2d7748064949f4b16845829670bf6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
b55e4cf81b6112868eb6f6cd9c1a3b32f8fcda49 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
302f79771fcdba3122cf61affb53e0a3a3a27e6d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
7f1af5b788616bab285a73bab5098fb6d134b159 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
51dbd71f1cd5a66dd7a5b0fbb753713d27ff937c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
cf1757ff453132fb64be0dec6c50eb935db29784 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h
59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
119432bbce99e91484a2bac79ca5257a36a7f98b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
7f15697ca8645f77352f88c2a84713f348e98a24 - src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h
220ac9628fe5afa0191b8c20304402baf0f70353 - src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h
23478354284aa1be69bc70fa4157aa408177829c - src/common/unix/nvidia-3d/include/nvidia-3d-volta.h
75859a11c0fae125a0619c47ead964416ac8d6ed - src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h
e621c127011311e8f97c8784d8539751a820bf47 - src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h
07fc2cd8495309f1218b9ddee4a4809b6dcb65a3 - src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h
1276b525f23b582e029c2ddc9ed0115f8e9dafb4 - src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h
5030b264e17b70df0c99bc9da4350bdb48f2f60a - src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h
146b4f305bfe710622a878fe3e9afd4f834124b8 - src/common/unix/nvidia-3d/include/nvidia-3d-turing.h
61f0a408812c04a59fb8f12713ce34d2ed544fe3 - src/common/unix/nvidia-3d/include/nvidia-3d-surface.h
e7a4acaef431a49ca7efd6bf72b6e8b57fafbab0 - src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h
40a9c57cca5b2f8acfe3ead472dcf0adc9423050 - src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c
af1a4d99bd19b72de120ba2046f35b95650985b1 - src/common/unix/nvidia-3d/src/nvidia-3d-volta.c
f78f737f1dfb52cf248543cced017a8fbad7b270 - src/common/unix/nvidia-3d/src/nvidia-3d-surface.c
4ea7a2a6811239760a1b56833fb07dbf8a99a10e - src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c
e43e6ce6b9781d44b68868703fdbb779fc95f5d4 - src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c
09fa5fbae25e08c819277566d7281f17305863f8 - src/common/unix/nvidia-3d/src/nvidia-3d-turing.c
e0ef9ab77cfdf207c800a9c067739add28632047 - src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c
57f19f6aa7b896794aafacd978b2469d976f6f78 - src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c
08c29625af227debb72dd703630a754ac4fbeee0 - src/common/unix/nvidia-3d/src/nvidia-3d-core.c
7ca41841cc54bd597f5c10cc346b8f574b1c2acf - src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c
d0331b7ebba0537af50bdf5815d9c048cbeb3388 - src/common/unix/nvidia-3d/src/nvidia-3d-init.c
569a662ce5f79dc450f44eeb7a0ff36580ba27fe - src/common/unix/nvidia-3d/interface/nvidia-3d-types.h
a06524af04de90562b08b6b26783232cf7ff01d4 - src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h
3e97ecc773087c0c7f370faf0a9ff838793c9bd6 - src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h
2d91e6f3ad425d3ca95de79ecb929b22cac57f52 - src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h
fd454a2318e970e6b1cb4a4b7b5633e4cb2e8b45 - src/common/unix/nvidia-3d/interface/nvidia-3d.h
34daeec12bbf45f0f85406afc56414da45afc2e6 - src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h
727210acfe72963aa6dddf1bcee91dc122897113 - src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h
069b576dc1f03143999512cd03fc48fe18ed6706 - src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h
881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h
cb7c13757ca480e10b4ef3e3851d82ad5ccca3f1 - src/common/unix/common/inc/nv_mode_timings.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h
3e64a8fe60bb1266a769be8a5c0716e10c816b38 - src/common/unix/common/inc/nv_amodel_enum.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h
edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c
7bccb5a3dea9208f0fbd86d36efc369f215d5c3c - src/common/unix/common/utils/unix_rm_handle.c
26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c
667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h
07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h
9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h
673bbd33569f55a900b5388a77d19edd3822ecf3 - src/common/unix/xzminidec/src/xz_dec_stream.c
9c67bdcbea04fbe1a5b2746549e502cdc368b54e - src/common/unix/xzminidec/src/xz_config.h
f2cfbcf1e2cb1d7545b5de609a4e7672bf8ae976 - src/common/unix/xzminidec/src/xz_dec_bcj.c
93af3bcdf863afa9655107c86f49aefdf9c05d90 - src/common/unix/xzminidec/src/xz_lzma2.h
fba46fe8f4a160d71a708578a85ab6731e4e024f - src/common/unix/xzminidec/src/xz_crc32.c
0ce26be0fb63a7ae52e2bb15a1770c80b9a5ac84 - src/common/unix/xzminidec/src/xz_stream.h
8365ec8d875fad74507d49228ad8959c66bbc360 - src/common/unix/xzminidec/src/xz_dec_lzma2.c
2ade48b4c53fc3bebf1587bc0a1a08b26cd5981d - src/common/unix/xzminidec/src/xz_private.h
c2a87873eeff2a8010bb8a2cb8d1df28a20a0097 - src/common/unix/xzminidec/interface/xz.h
4498dc65d71b2b8635b365550e5e521da14c8e6b - src/common/unix/nvidia-push/include/nvidia-push-priv.h
4847b168b4f5e78dbb92cfec80734789a9131b87 - src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h
616dd99d8dda5dbe35032a5fc558ff48f7cc1620 - src/common/unix/nvidia-push/src/nvidia-push-init.c
0916485ec1ff275771d88a725dcbf586663dbc33 - src/common/unix/nvidia-push/src/nvidia-push.c
548f9e591d2c851b157575e1b83e25eb47bc61e6 - src/common/unix/nvidia-push/interface/nvidia-push-methods.h
5f5013bdbda9582252db2e92a105a57f24ca7d96 - src/common/unix/nvidia-push/interface/nvidia-push-init.h
f3576444d1dbcc4e9379bee6151ef8c7a382e276 - src/common/unix/nvidia-push/interface/nvidia-push-utils.h
918c4f2e2edd0a52c7085f758286dacd21b5b4c5 - src/common/unix/nvidia-push/interface/nvidia-push-types.h
b54add7dea08ff736ac27ee259f6ccb389c01f09 - src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h
5d014581148b38eede1d31a1f48e388cf6eb7a45 - src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h
e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h
bcad75550591ede46152403e40413f87e85b0a80 - src/common/inc/nvlog_defs.h
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h
0e970acfcadddd89fae91c812647fecb80c98d52 - src/common/inc/pex.h
73e2133709eb920a92fcebf7aaab958020493183 - src/common/inc/nvctassert.h
6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h
489ce9f046d9c2ff95a1284ab5e04b5843b874ae - src/common/inc/nvVer.h
7ab322addb3e1ba880cee07dc0d26d882db097b0 - src/common/inc/nvCpuIntrinsics.h
d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h
d70c17a0693c8b5dbf7c83f693eec352ce22917c - src/common/inc/nv_smg.h
b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h
4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h
8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h
82aadec9509f41eab58727c3498dc24a30a0128e - src/common/inc/nvrmcontext.h
d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - src/common/inc/nv_mig_types.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h
e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h
987027bed503d8ce5ad01706aae4a16ee37f3e2d - src/common/inc/nvSemaphoreCommon.h
5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h
4a88a536b71995db70e3a83a48d47072693ec69d - src/common/inc/nv_speculation_barrier.h
2408132586b69e580ff909f7f66451aa2882abff - src/common/inc/nvPNPVendorIds.h
4f7ca8fb43d6885cf60869ed241476032f20f5f3 - src/common/inc/nvUnixVersion.h
23edf9cce2608c494dad045b9466b8f3a18bab56 - src/common/inc/displayport/dpcd20.h
ecc26f6fae35818791733c1a56ea1b556bba7f4f - src/common/inc/displayport/displayport2x.h
aad6f14dacdb166a8d884cae6c5f382d98e5c46c - src/common/inc/displayport/dpcd14.h
27572a26d0a0a32f38606323ea6da65096bac039 - src/common/inc/displayport/displayport.h
8f7c9c19a76eca84fc2556841042c2f1c3d07a1a - src/common/inc/displayport/dpcd.h
4ee8a4d2a0fe12d348ac4c1a1e0a22bd272e146d - src/common/inc/swref/common_def_nvlink.h
e182f9538fea08b5d25f3e74083a7a12a7d49809 - src/common/inc/swref/published/nv_ref.h
641e9803749cbeeca1149c43fe2da5e6edf25137 - src/common/inc/swref/published/nv_arch.h
059493ce7d5390b7e859a19d1a24752df8126ace - src/common/inc/swref/published/turing/tu102/kind_macros.h
86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h
38589617aab40efdd86b401a18d1e28b5d3b9f8e - src/common/inc/swref/published/disp/v05_02/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h
c01e4a95ede641ff5a9e6918b39db4d2099c91cb - src/common/inc/swref/published/disp/v05_01/dev_disp.h
04345c77f8c7a8b4825f0cb7fc96ca7c876af51c - src/common/inc/swref/published/disp/v04_01/dev_disp.h
1604a3fa3e3142118c82a1dc621cdac81806195a - src/common/inc/swref/published/disp/v03_00/dev_disp.h
c4f12d6055573a19f9211fdddd3778575e2a17fd - src/common/inc/swref/published/disp/v02_04/dev_disp.h
64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h
b5ce995e9e5afcd73d39642e31998e087ea133e8 - src/common/shared/nvstatus/nvstatus.c
08816a33e698308c76f3a026c29d0dcb41c5ee20 - src/common/shared/inc/compat.h
9231ac111286772170925e8f6cf92bde5914abb8 - src/common/shared/inc/nvdevid.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h
a6ff1a7aee138f6771c5b0bbedb593a2641e1114 - src/common/displayport/inc/dp_messages.h
80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h
02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h
e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h
b2db6b37515f7c979e18686694546b9fa5145459 - src/common/displayport/inc/dp_hostimp.h
29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h
575f4f97189ad6b4944bdd4127cdbee79d8c688d - src/common/displayport/inc/dp_groupimpl.h
cf09c061fa898cd84edd34a9457726abc501b03c - src/common/displayport/inc/dp_configcaps.h
afa1135330de2ce8f1a6d20e99b54f507b5adbbd - src/common/displayport/inc/dp_evoadapter.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h
a086546bf92d7e5e9adf66dcac012b3dc81c2597 - src/common/displayport/inc/dp_internal.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h
2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h
07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h
f09aae8321de23e0a48072d0e082aecb84a3ebbe - src/common/displayport/inc/dp_mainlink.h
cae50568f7bef4a2a69c4d718a5297b9ae15da3f - src/common/displayport/inc/dp_deviceimpl.h
eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h
6c87ce702f215b21c1ab0064a2a85b3eda96ecec - src/common/displayport/inc/dp_edid.h
be558902391fb6cb5085652b560391b54befca4b - src/common/displayport/inc/dp_printf.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h
2016714a04d46ac8412ef55d2156d86ba4d594eb - src/common/displayport/inc/dp_auxdefs.h
e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h
09c80a469f1e7e0edd6381578d66fd0e789bc0db - src/common/displayport/inc/dp_regkeydatabase.h
7622cb576c2ebbfe65c0f6132d8561ab1815f668 - src/common/displayport/inc/dp_qse.h
dd420c9e7c271d8bea047d431667524105473e95 - src/common/displayport/inc/dp_linkconfig.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h
430f42522a1e60f2420aa2e4e471aa20945d0253 - src/common/displayport/inc/dp_timer.h
0f71b80d0a0d53fc6581ef341a4e637a467a3795 - src/common/displayport/inc/dp_connectorimpl.h
c8c55dfc7b085b421b01bd9dc7b74abe6f9a0932 - src/common/displayport/inc/dp_connector.h
78ef30b2caf2cf4ff441b5613a796b93ae8973bd - src/common/displayport/inc/dp_messagecodings.h
1363fca23628f312c4b6b0c868b8a43f4a8a5a24 - src/common/displayport/inc/dp_watermark.h
d2b00a849a81f6c6092e3b2c4e7ed20fcee62b39 - src/common/displayport/inc/dptestutil/dp_testmessage.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp
107b170d4496a754f22819e66794bcdc51256b7c - src/common/displayport/src/dp_sst_edid.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp
2caf1cd4a99e55126883dbdd9f6b74883c71e171 - src/common/displayport/src/dp_messagecodings.cpp
ef3fefa8dd819d4086c054919b769ca18d058469 - src/common/displayport/src/dp_wardatabase.cpp
c49e37f3e225e60a74c71a2b571e542e12fd9bc9 - src/common/displayport/src/dp_watermark.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp
d699ce22e5e2d641caa2fbacca3095d7dd7b3ffe - src/common/displayport/src/dp_evoadapter.cpp
5f2fb1683cff15175e3ef2276b721863886adc79 - src/common/displayport/src/dp_vrr.cpp
0717b87aafecbe2216e0f0b53ee088a980ef7ad4 - src/common/displayport/src/dp_auxretry.cpp
0670fb5302b1bd3fc65daa848f23e4086619b5e6 - src/common/displayport/src/dp_discovery.cpp
5c12759c27407e8df4c8f1f7bc6ec1595b6b1a63 - src/common/displayport/src/dp_messages.cpp
93ba2409667997fdbcb7af1a8f24ec4a0e15b62c - src/common/displayport/src/dp_timer.cpp
ffdd039884b1400eaf4d6d7cc81d0faba5282014 - src/common/displayport/src/dp_deviceimpl.cpp
c625716e5516a290ac501563e2a73eef9b4f7dd6 - src/common/displayport/src/dp_edid.cpp
af1672e8abb92d8d574d9605285753a8580c5d10 - src/common/displayport/src/dp_groupimpl.cpp
2cda981a5e36285ba4173573d074f8761e74f186 - src/common/displayport/src/dp_qse.cpp
5c7adbdfe295f7e1a1d4899a62bf95b456f84412 - src/common/displayport/src/dp_messageheader.cpp
d3c4c54f96cc02d37fab45521685426e5c38fb4d - src/common/displayport/src/dp_mst_edid.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp
eb7e47407bd04e871f891038cc08736d066ffaa9 - src/common/displayport/src/dp_connectorimpl.cpp
a62b774b7c45882b5854b91b600987c343c24966 - src/common/displayport/src/dp_linkconfig.cpp
0a8818da34b5321763c1f60cb8b6ea5e1a2837f1 - src/common/displayport/src/dp_splitter.cpp
24c0787ce5ec691c6b8edb351000265f47e0156a - src/common/displayport/src/dp_buffer.cpp
422a5d3426d5e1cc2346d9d5f86ccde66062ffdc - src/common/displayport/src/dp_merger.cpp
41589d1d5bfa4316d5d066a7201226baed5332db - src/common/displayport/src/dp_configcaps.cpp
a0b68fce10eb0b95518cfd291e2d282872225295 - src/common/displayport/src/dptestutil/dp_testmessage.cpp
f0a73cd173382d8abd4b0c70da8b32e144740bb5 - src/common/modeset/timing/nvt_dmt.c
15d7c508b621c877887962b2c27cdb6c7d1144a0 - src/common/modeset/timing/nvt_util.c
1341b987df8336c882e31d22d2141cadfb67272d - src/common/modeset/timing/nvtiming.h
f8faf3eabd24a1239e1d4faebdc40c0ffa713ff9 - src/common/modeset/timing/nvt_edid.c
c95a1c7914b0d1cba366f2a29e08eb93e0ad033d - src/common/modeset/timing/nvt_edidext_displayid.c
3d3a0889baed7a15c2adce54ba56c1dc783faffd - src/common/modeset/timing/dpsdp.h
ff92b05f8648cb4bc31c0f64707065bb56ff3eb3 - src/common/modeset/timing/nvt_dsc_pps.c
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c
2737ed1d1eccd163f9cd12b1944f96a03c526b31 - src/common/modeset/timing/nvtiming_pvt.h
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h
6d221aad371436ba304448ba2cf04f89148a09bb - src/common/modeset/timing/nvt_edidext_displayid20.c
48761f63bc2794dfbde10492cc53137458cfcd0e - src/common/modeset/timing/nvt_dsc_pps.h
08ef97092899a3dc80251f61cedc73a851d70baa - src/common/modeset/timing/nvt_edidext_861.c
d7cb716eeae50ecfe44fb3c4c4476de598ab78d7 - src/common/modeset/timing/nvt_tv.c
080c1de64d099ecb1aeb9b0b2f176f7be2d609b5 - src/common/modeset/timing/displayid20.h
1c2e163802849848e9ae1586d38c4cd82494217f - src/common/modeset/timing/nvt_ovt.c
54aa88075d9ceb9c6ef99d9c15cb32751a33f8d0 - src/common/modeset/timing/nvt_cvt.c
e13cbe77f864afcddaccff7aeb1923cd02f1482f - src/common/modeset/timing/nvt_displayid20.c
f8911888bdd441666c03fe27381d7730b7dd9131 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h
12118b508a757fd0a162d1e740d93685a67363ea - src/common/modeset/hdmipacket/nvhdmipkt.c
5b541b9ab6fe9333815a760d4043fef725b1c848 - src/common/modeset/hdmipacket/nvhdmipkt_C971.c
83d94f0a5eb7318d00d96115b0139f9f99052ddc - src/common/modeset/hdmipacket/nvhdmipkt_CC71.c
b390bf4f74d690068ff24dce90b79b227769ac2f - src/common/modeset/hdmipacket/nvhdmipkt_C671.c
206727972ab3a5f8a2cde0e153d63aef929b6c01 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c
a71968671ce6b64e235de6902bebc2a06da7ae04 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c
57dbf547549c6fe24eb51cc54185b321c263108f - src/common/modeset/hdmipacket/nvhdmipkt.h
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c
559406ebdbd7f810f1ecbeb3e78b6518834b90fe - src/common/modeset/hdmipacket/nvhdmipkt_class.h
e1df3885cd76f5159801c1f66f20b18537eaecf3 - src/common/modeset/hdmipacket/nvhdmipkt_C871.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c
764d216e9941d0dcf41e89b2a0ddd8acf55902c8 - src/common/modeset/hdmipacket/nvhdmipkt_common.h
b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h
f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c
21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c
29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c
d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c
054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c
dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c
8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c
86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c
68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c
0126e0fceb1fa7912f4d5b8c3a6ebb4a048eb98a - src/common/softfloat/source/f16_to_f32.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c
0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c
0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c
9a60700ce25578100d83d529e49f08f71cf35e17 - src/common/softfloat/source/s_normSubnormalF16Sig.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c
729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c
9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c
760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c
c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c
e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c
824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c
00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c
1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c
fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
8efb3f7cd3217b5cd25896b4bad058c72fe5b89a - src/common/softfloat/source/8086-SSE/specialize.h
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
98a850359fe08a7e39212f89ce96014ba80910da - src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h
1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h
5f589a4d48cc59a0e5762303df9ea4a06ca398da - src/common/softfloat/source/include/softfloat.h
9d8a025889f3ec0e1cca7c4b52308158e1f39226 - src/common/softfloat/source/include/primitives.h
f118cad66d3c8ee17a52cec97cd3dc7e7a1cf2bc - src/common/softfloat/source/include/internals.h
14045fa6330dc6ed20d35eac5b4c5909631bca90 - src/common/src/nv_smg.c
abccf0a8732b881d904d937287ced46edcde45ac - src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld
dcf4427b83cce7737f2b784d410291bf7a9612dc - src/nvidia/arch/nvalloc/unix/include/nv-reg.h
4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h
3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h
7fc52a43b242a8a921c2707589fa07c8c44da11c - src/nvidia/arch/nvalloc/unix/include/nv.h
81592e5c17bebad04cd11d73672c859baa070329 - src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h
e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h
d1b1a1bc1fa30c1a966e95447f7831a06340d2d0 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h
7e0175a8006f06b1d5f5be078d851a4f01648b96 - src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h
2eb11e523a3ecba2dcd68f3146e1e666a44256ae - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
5f004c33f130e6c5cd275f9c85d46185e4e9b757 - src/nvidia/arch/nvalloc/unix/include/os_custom.h
499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
824ffbe85c591c7423855bee7bf3193473ef2b70 - src/nvidia/arch/nvalloc/unix/include/osapi.h
669bd0c054b00a74e8996c18063fa9bbf5cd7690 - src/nvidia/arch/nvalloc/unix/include/os-interface.h
2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h
b3ecb82f142a50bdc37eafaeb86d67f10fbcf73f - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
af45762b6eeae912cc2602acf7dc31d30775ade7 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
107d1ecb8a128044260915ea259b1e64de3defea - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
98a5a3bd7b94e69f4e7d2c3a1769583c17ef5b57 - src/nvidia/arch/nvalloc/unix/src/os.c
a659a503a6fcffdcacd2b76ae6b1f156b4b9216c - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
b5ae9b8d551a3e5489605c13686fb6cce4579598 - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
a17aae37486b325442e447489b64add3694ab8b0 - src/nvidia/arch/nvalloc/unix/src/osunix.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
07f9c0995f1fbbba9eb819321996b57c1d2b86cd - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
d8815125dbf79831b8fe55367bba60e7115243cc - src/nvidia/arch/nvalloc/unix/src/osinit.c
b1e9f004152562aebd967505fcc1f52b774aef15 - src/nvidia/arch/nvalloc/unix/src/osapi.c
a7383deea9dcab093323d8dde1ede73f85f93343 - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
b1a6d0a1ca4307b8e8d9cf136c94ef7c9efbae4c - src/nvidia/arch/nvalloc/unix/src/registry.c
915ee6dbffff92a86d68ac38549b25aa1e146872 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
ffea38efca6a43af9bc61bb6cb8c2b14c3d6fc20 - src/nvidia/arch/nvalloc/unix/src/escape.c
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
8530e3d1db60647a9132e10c2119a75295f18060 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h
1cd024cc06bba6f7c3663ca2d03fe25bd77761d3 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
0be1c1ff5f200a9aa68cdf3d03bc4780e757a1ea - src/nvidia/generated/g_traceable_nvoc.h
998d18bc2f6e2cdd00cf383000b66be8e8778baa - src/nvidia/generated/g_nv_debug_dump_nvoc.h
4491368ac52cfda834bdd24df3b6f156c32ec3a9 - src/nvidia/generated/g_client_nvoc.c
4eb2331b2f9f8d8c01d62ad771702e9b42f22b65 - src/nvidia/generated/g_lock_stress_nvoc.h
6b5bf7b2f5dd000bfa2949e14642dd582ba4a378 - src/nvidia/generated/g_event_buffer_nvoc.h
cd5f4b0bc23710e5b6277ff214a62c4993e95581 - src/nvidia/generated/g_code_coverage_mgr_nvoc.c
b9903d23010ea9d63117c27d5fe0cfba09849fa4 - src/nvidia/generated/g_context_dma_nvoc.c
4b7aaad308f2f25b07d932fc0fe0c3327db522a9 - src/nvidia/generated/g_objtmr_nvoc.h
7bd355d08dc6f2509db22ed56f1c05ab97f5f620 - src/nvidia/generated/g_allclasses.h
4eea9bd7952613f08af07508e2e9c1c0344940e7 - src/nvidia/generated/g_gpu_mgr_nvoc.h
c5cad88aa7de5a04a3b6f9836f355347448d6a7b - src/nvidia/generated/g_rmconfig_util.h
db1d1e047d00780efbe4c1c1ae6e4fecd3ab49e8 - src/nvidia/generated/g_os_desc_mem_nvoc.h
1ec59322d0874153252a387dcb50bf6d7328d56e - src/nvidia/generated/g_system_mem_nvoc.c
21e57b9c63e847eeb5a29c218db2c5c37db83298 - src/nvidia/generated/g_gpu_nvoc.c
4613f3d42dbc899b278fca71c3aaae79159d7dbe - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c
b55573cb02ff8129aa4f5aa050ac53d1f4fcfdb2 - src/nvidia/generated/g_rs_resource_nvoc.h
16c8d551a3a908ec194d39c88c5603cea436c9b7 - src/nvidia/generated/g_binary_api_nvoc.c
a232e1da560db2322a921a9f0dc260ad703af2b4 - src/nvidia/generated/g_mem_nvoc.h
c503ca5954b8f6ebdba96904a1616a55ce08a2d3 - src/nvidia/generated/g_device_nvoc.c
e7cc58e9f8173583bd253fa73df56324e48aa5ad - src/nvidia/generated/g_io_vaspace_nvoc.h
b93ab0b9e39ca3c5b397cbdba58e4d9894d4130f - src/nvidia/generated/g_rpc-structures.h
afda2b8579ed309e23be0ad1a835ee84fcbe535f - src/nvidia/generated/g_client_nvoc.h
e97edab623386f7d1534b4f053a66fc8659167f6 - src/nvidia/generated/g_event_nvoc.h
f4b2bffbdbb2b0b398e8dfe3420e46b2bf27839c - src/nvidia/generated/g_hal_nvoc.h
4626f4a1a4eadc3695d79454db25bd0153d1165d - src/nvidia/generated/g_resource_fwd_decls_nvoc.h
30035e0fb1ae8b816fc42b78a17eb30462640ce4 - src/nvidia/generated/g_kernel_head_nvoc.h
52ae6273ddf101e9715aed99991506cad8e96859 - src/nvidia/generated/g_disp_inst_mem_nvoc.c
abc769851bd523ee08cf829bf3864cf5475066ec - src/nvidia/generated/g_subdevice_nvoc.h
255c404719b18c2a3aec2a47948c0fbcf4affd4b - src/nvidia/generated/rmconfig.h
c7fda8cbe109ad2736694ce9ec0e2ab93d0e3f2c - src/nvidia/generated/g_mem_list_nvoc.h
f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h
dc7bbba203ee5ff91b6f14eb3abfad8c15854e1d - src/nvidia/generated/g_mem_desc_nvoc.h
1702c9d021149c0f5c73ebeda7bea29e246af31d - src/nvidia/generated/g_nv_name_released.h
2e0c45e4186d44774286a71daf797c980c2ddf7a - src/nvidia/generated/g_objtmr_nvoc.c
9b78bc02a8fe0ec297167bb4bdb7f8255b94198b - src/nvidia/generated/g_disp_capabilities_nvoc.h
967d8c0d7d5c1271e82f30af992f48322695d367 - src/nvidia/generated/g_eng_state_nvoc.h
831cdf0767703c00918e70ef3933716b201781f1 - src/nvidia/generated/g_syncpoint_mem_nvoc.c
ce74dbd8f88f50af0b3ea3b3034395cd98eb08e8 - src/nvidia/generated/g_gpu_access_nvoc.c
08ad957117efefe2e04448bce1cad2dec0e984af - src/nvidia/generated/g_odb.h
033a6d6bac0829783afe8a582fa6c4f329be7f04 - src/nvidia/generated/g_hypervisor_nvoc.h
c1471919f6c19e1b576b7c636ba5ae7ab9d58177 - src/nvidia/generated/g_gpu_db_nvoc.c
f68b7e209e268d14b0b98686d1766683139b9b5f - src/nvidia/generated/g_system_nvoc.c
cdcab5a0094b9e9664f7a0e62ec31783617de5ab - src/nvidia/generated/g_code_coverage_mgr_nvoc.h
5e614b6db957a0ae77502ca6d5966bca506f8020 - src/nvidia/generated/g_gpu_group_nvoc.h
eb15207a28b8eed41182de6311ec48f5e321729f - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h
ef9def144aaf1b2b292c9815c68a6007eff56dda - src/nvidia/generated/g_rs_server_nvoc.c
eb07ee114f8cfc039978cdb7501c3ea03c879864 - src/nvidia/generated/g_generic_engine_nvoc.c
d2f3d17e05337992bc031c823186583d62c10235 - src/nvidia/generated/g_chips2halspec_nvoc.h
ad94c2430328b91392db363158fa2279b794cc54 - src/nvidia/generated/g_gpu_resource_nvoc.h
c77048521f9c9890f14108c2c5457d78a85fe69d - src/nvidia/generated/g_gpu_access_nvoc.h
38a98487eec65d8807e47f99b013619c1537e983 - src/nvidia/generated/g_dce_client_nvoc.c
d09bde39b1f12490ea0a696d6915d521c9f13953 - src/nvidia/generated/g_rpc-message-header.h
9becba61ba5ff7580b353abfb87cbe0f37817195 - src/nvidia/generated/g_binary_api_nvoc.h
50f70075eac2515b189e2d07a06b13cfa826945f - src/nvidia/generated/g_rs_client_nvoc.h
f8b984c6bc09554753cfe6692dde2eb3171abc57 - src/nvidia/generated/g_disp_channel_nvoc.h
4931b316fc042705a5f094c8c23b0038f980b404 - src/nvidia/generated/g_generic_engine_nvoc.h
2a28557874bd51f567ef42c75fd4e3b09d8ad44d - src/nvidia/generated/g_gpu_arch_nvoc.c
a17058fe665949f1e3861fe092e29b229cefbe62 - src/nvidia/generated/g_mem_mgr_nvoc.h
7aa02b964507a8269d35dc56170955025b98bd1a - src/nvidia/generated/g_gpu_arch_nvoc.h
0b9296f7797325b80ff0900f19a3763b564eb26b - src/nvidia/generated/g_context_dma_nvoc.h
4210ff36876e84e0adf1e9d4afb6654c7e6e5060 - src/nvidia/generated/g_resserv_nvoc.h
3613b4ec9b285a4e29edefa833704789c887c189 - src/nvidia/generated/g_tmr_nvoc.c
517b6b986a3749c9a6dd0f22bbef6569cdb48d97 - src/nvidia/generated/g_rs_client_nvoc.c
7670f19682bcd6224c999a8f80e770368e735632 - src/nvidia/generated/g_lock_stress_nvoc.c
b348b1b465cb359ca3cf10f5e121714ffb95b582 - src/nvidia/generated/g_standard_mem_nvoc.c
54fa23e7cf0f07d625c25d5c08dad9cd1714f851 - src/nvidia/generated/g_standard_mem_nvoc.h
7e528d775caa7ff2bf4159c94fc2c2e4d3aadffc - src/nvidia/generated/g_chips2halspec_nvoc.c
40aa2c65168c893c725c983b2219ceff03d05608 - src/nvidia/generated/g_gpu_halspec_nvoc.h
17c4ce5e67bf8bc8f48a4e2b1b7752d4597703ad - src/nvidia/generated/g_kernel_head_nvoc.c
3ad8329c7f7d63633b7abf2cdd502e4257fa1726 - src/nvidia/generated/g_event_nvoc.c
7aba35752cd4c6447f844cd9432d7dc1bc77b33d - src/nvidia/generated/g_disp_capabilities_nvoc.c
fa3a5418a5d6bd7fb2b375ed7f7b64293fdf5f86 - src/nvidia/generated/g_ioaccess_nvoc.h
3c3961ddf6422294c3322e3b0a3c97ee94bfd010 - src/nvidia/generated/g_gpu_mgr_nvoc.c
b73b22368abf741cc0a5108b6c9585a81de28b57 - src/nvidia/generated/g_hal.h
6e219df1367ce7dc8f5f4a1f2209a7808a927871 - src/nvidia/generated/g_hal_mgr_nvoc.c
279538daf54163a7a53aab1330fba2c00fc3f234 - src/nvidia/generated/g_rmconfig_util.c
49e84272bbce137683232275b4f13a19c644c650 - src/nvidia/generated/g_prereq_tracker_nvoc.h
57eb0772bc280690eade3f5d54f786e252c75099 - src/nvidia/generated/g_object_nvoc.c
113297c44e702cd6535e007c1c5b2dd5e6f809dc - src/nvidia/generated/g_ioaccess_nvoc.c
216040d1883e8c4f1e8b47d9f6b279ec111d094d - src/nvidia/generated/g_hal_mgr_nvoc.h
113b10cf6cef2608ff4a288e2944d56da64f355d - src/nvidia/generated/g_gpu_group_nvoc.c
86bb88ccdfa34510d4acf21684e5b8bd32d820b2 - src/nvidia/generated/g_disp_sf_user_nvoc.h
5c0ed2e135f53ca09fbfb542bea88b304a2e1208 - src/nvidia/generated/g_event_buffer_nvoc.c
979082b8c018eee55d880265f7bfd294360816c6 - src/nvidia/generated/g_hda_codec_api_nvoc.c
f917323efc9429fcea8643eb9a8d5ee46b1b50a5 - src/nvidia/generated/g_eng_state_nvoc.c
437329a9c6e35e4b02945ec035448e704521280e - src/nvidia/generated/g_hda_codec_api_nvoc.h
fba7a2891fe10e837f5897034b8176a7307fbb12 - src/nvidia/generated/g_lock_test_nvoc.h
05269b7e73347b580f11decf0e1b9f467d0cb60c - src/nvidia/generated/g_dce_client_nvoc.h
e175ab2ef1fd5b64c9f0d665a26b2ed6f864b106 - src/nvidia/generated/g_vaspace_nvoc.h
cc7ec616b034ec01da1c5176b6c62759c3f31a06 - src/nvidia/generated/g_subdevice_nvoc.c
93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h
1745f3002758556d1b6d11a24d088ef87ba18bd5 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c
8c9f26e959fa9a6a3c4a5cb8875458cc4a9bfe9e - src/nvidia/generated/g_os_nvoc.c
3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h
a1fad555b8ad36437992afdd6e3e08d236167ac7 - src/nvidia/generated/g_journal_nvoc.h
d210a82e3dda39239201cfc1c2fcb2e971915c1e - src/nvidia/generated/g_device_nvoc.h
836f88914b046eadad9435786e1b474ee6690f5f - src/nvidia/generated/g_gpu_nvoc.h
ea0d27b0f05818e2e44be7d04b31f8843e1d05b7 - src/nvidia/generated/g_io_vaspace_nvoc.c
10529db24fb0501aa7f2aae25e0a87247ab5405c - src/nvidia/generated/g_resource_nvoc.h
5d47bed309c731bfee4144f61093192e7efcaa55 - src/nvidia/generated/g_disp_channel_nvoc.c
8771d8f2cf58f5e1d91ece01c1962677cebc5e4b - src/nvidia/generated/g_rmconfig_private.h
951c1c8969a621344d4d2a3ec61b1ad51b39ea79 - src/nvidia/generated/g_client_resource_nvoc.c
629b6daac6c9215dc982973b6adcf84314d34d57 - src/nvidia/generated/g_gpu_halspec_nvoc.c
29d5ccf874298c8156314a6eb23c209f2920b779 - src/nvidia/generated/g_gpu_resource_nvoc.c
fc26ab853e7c981c271ced30dfd78d95cd9bcdfd - src/nvidia/generated/g_gpu_db_nvoc.h
aa76beb8b33254fae884434b688093f9c7f12c87 - src/nvidia/generated/g_hal_private.h
86739259b5059c9b9ea3061bd8d1846385cb95f4 - src/nvidia/generated/g_sdk-structures.h
41bc858f6aca964a8977ad96911ecf1e8b46385d - src/nvidia/generated/g_hal_archimpl.h
f87916eae53dbea2f6bdbe80a0e53ecc2071d9fd - src/nvidia/generated/g_lock_test_nvoc.c
6b8597803d509372152e3915f15139186294add5 - src/nvidia/generated/g_gpu_class_list.c
2101385d1332db9a2902370a6b3c6117ca8b2737 - src/nvidia/generated/g_kern_disp_nvoc.h
d71ff42bc0fc0faf1999a6cbe88c4492a47e200e - src/nvidia/generated/g_os_nvoc.h
e58abb783f7561d0af925c2fca392c5165fcb199 - src/nvidia/generated/g_kern_disp_nvoc.c
d6a34926ab710156c9c4b2d9f12a44e6dafd43d1 - src/nvidia/generated/g_tmr_nvoc.h
c4c67b0e0284656b32c7b4547e22d521c442124a - src/nvidia/generated/g_disp_objs_nvoc.h
8e49b4d77641c98c6101dbc88a79290ceca6271a - src/nvidia/generated/g_rs_server_nvoc.h
af206c390549eff5d690ad07f3e58cd417f07f5f - src/nvidia/generated/g_hal_register.h
be659882e731b6a2019639265af46239c5c96ebf - src/nvidia/generated/g_hal_nvoc.c
db76e8669776fbfa901c60d9b9908af2fabc4703 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h
797bd0197236fb0afc2c7e052487db803ac5baf0 - src/nvidia/generated/g_rs_resource_nvoc.c
884bed29fb4735ae0b4504fc874702acd29ee541 - src/nvidia/generated/g_mem_mgr_nvoc.c
3168beb42f15591a50339692d502e04977615a7b - src/nvidia/generated/g_prereq_tracker_nvoc.c
8e0071daaf5471a0fb3856705ec993704eaed4b5 - src/nvidia/generated/g_disp_inst_mem_nvoc.h
fb464cf839a1e76ac2a27346c7cd46ca921f1f56 - src/nvidia/generated/g_traceable_nvoc.c
8588d6f88ab5e8682952063fe0e2c840b334c622 - src/nvidia/generated/g_eng_desc_nvoc.h
de99523103dd7df0934cbe7aa21179ec7f241817 - src/nvidia/generated/g_os_desc_mem_nvoc.c
aa43dd8bdbdc71dc64d65e948221c7d5235588e7 - src/nvidia/generated/g_disp_objs_nvoc.c
9b6cc3a5e9e35139e9245cbe753fe9a552a488c0 - src/nvidia/generated/g_syncpoint_mem_nvoc.h
ae311b0968df9e9c9c2cec89e3060c472fc70a4c - src/nvidia/generated/g_mem_nvoc.c
dc7a782be9a0096701771cb9b2dc020c2f814e6d - src/nvidia/generated/g_system_nvoc.h
93a47004dd1c7529c6ee5f8abdf8b49c336fb681 - src/nvidia/generated/g_disp_sf_user_nvoc.c
3b5dfad8fccd7251cc177c7ea1b90265b4b6c901 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
b53ec15a1aaf102d42b79881cd1b270afeb7205c - src/nvidia/generated/g_system_mem_nvoc.h
67b2d3ea81ebe7be679bcafc688ced0d64f16edf - src/nvidia/generated/g_object_nvoc.h
b1be7145e70d8811fbdbe07c0e99f32ad0e38429 - src/nvidia/generated/g_client_resource_nvoc.h
0d5b87b117d39b173a2a21a5cd71572bc2b26697 - src/nvidia/generated/g_resource_nvoc.c
51df7972f9932c2a5d800d4e2b3e4828e5aa2038 - src/nvidia/generated/g_vaspace_nvoc.c
0820fa0a975b2474ce0fdf64508cbd7758f60e5c - src/nvidia/generated/g_ref_count_nvoc.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h
ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h
671c628ff9d4e8075f953766adcab9bfc54bd67c - src/nvidia/inc/libraries/poolalloc.h
1e8730e4abd210e3c648ef999ccc2b1f1839b94c - src/nvidia/inc/libraries/field_desc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h
d3cd73c0c97a291e76e28a6e3834d666e6452172 - src/nvidia/inc/libraries/nvoc/prelude.h
79b556739f0648cec938f281794663433fc5e048 - src/nvidia/inc/libraries/nvoc/runtime.h
91c67f272f0ada6f386e9f4a78fbde70aa5c883d - src/nvidia/inc/libraries/nvoc/object.h
c0f66cf7b2fb6ca24b5d4badede9dcac0e3b8311 - src/nvidia/inc/libraries/nvoc/rtti.h
a3db778e81f7188a700e008e4c5f5b1320ab811e - src/nvidia/inc/libraries/mmu/gmmu_fmt.h
1daea206ab581fa3554ff1811e1253a7d0053ac0 - src/nvidia/inc/libraries/mmu/mmu_fmt.h
56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
b8e52b576e6668e4de7ea65a31e12c2bb491a591 - src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h
e772583f7fbf994fcf923d527d42372a716b4c57 - src/nvidia/inc/libraries/ioaccess/ioaccess.h
26853c886d848fb88e14da3aceab23f90589c05d - src/nvidia/inc/libraries/utils/nvprintf_level.h
c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h
72dcc09b77608263573bd34adf09393328eddf86 - src/nvidia/inc/libraries/utils/nvrange.h
b598ccd2721892b6915d4be432f1fc332477b666 - src/nvidia/inc/libraries/utils/nvbitvector.h
9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h
357a7fd76af55cebcbc96a230064e23e50c03f57 - src/nvidia/inc/libraries/utils/nvprintf.h
1b265cb4fcc628862e4b27ae63a897871987eb76 - src/nvidia/inc/libraries/utils/nvassert.h
39113db75fdab5a42f9d8653ed1c90018b8b1df4 - src/nvidia/inc/libraries/containers/map.h
11ce1423312f4c34df19672e45678d0531cc299d - src/nvidia/inc/libraries/containers/ringbuf.h
5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h
fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h
661b551f4795f076d7d4c4dab8a2ae2f52b0af06 - src/nvidia/inc/libraries/containers/list.h
47c69b04f95664e742f1a0a02711eeb1fb71000b - src/nvidia/inc/libraries/containers/eheap_old.h
5da20ecad3ff8405dea782792c6397d21ba76f7c - src/nvidia/inc/libraries/containers/vector.h
bcfc41a04576a4244c9dc3fe2a32c8e582f16c3e - src/nvidia/inc/libraries/containers/type_safety.h
5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h
4e26106c9c758c9e48418451ac01cf591ed74a31 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h
41843197a5c11abc93df89b8f10a5f815e7fe6af - src/nvidia/inc/libraries/nvlog/nvlog.h
13aedc8ccf6acdd71be71b2219f79cd1af411273 - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
7c9c9456aaacbeffa11a9af54fe2250095ebbb00 - src/nvidia/inc/libraries/tls/tls.h
87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h
4bf45849bc1c6b89d7a79d761cce84a1d5026eac - src/nvidia/inc/libraries/nvport/debug.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h
6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h
fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h
16a35b2b6fd6eb855acd64d72480b285795f54b2 - src/nvidia/inc/libraries/nvport/memory.h
f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h
96c7c30c9f6503675f0903a16207a0ac06a6963d - src/nvidia/inc/libraries/nvport/cpu.h
53d843988669f61528cd45099ced749defa4cf7e - src/nvidia/inc/libraries/nvport/string.h
d1863efe7b8a63f1c5a7f47856b95ad31fd1a561 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h
bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
a7cb79bf7ac48e0f5642ecfd2e430bb85587dddf - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h
f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h
645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
a902e0f4265bd3dbd251afefa8ceb0389464d886 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h
fa5a5d8fa07cae6b8ef9d9135dc5d7e7624533d2 - src/nvidia/inc/libraries/resserv/resserv.h
972165721958839bc1d510fda9409d35ff89ec21 - src/nvidia/inc/libraries/resserv/rs_server.h
883bf7295d707014278e035f670d151275975d18 - src/nvidia/inc/libraries/resserv/rs_resource.h
2ad85ddca7cd230cea917e249871277ef1e59db1 - src/nvidia/inc/libraries/resserv/rs_client.h
cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h
4aa45a3755ef172aa35279e87dd5cd83cab1bc2e - src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h
f2fd94a00e5debf1dc7f7ad4c00d417552fb0554 - src/nvidia/inc/kernel/vgpu/rpc.h
37598b6c25aac1a07cbc2bc5c76ebecdbca56eb6 - src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h
fea4bbeb739723d3b80b5b3d8943e746e58fae07 - src/nvidia/inc/kernel/vgpu/dev_vgpu.h
f64d3723d0c475558bed799da8d2c5ec32a7d3a8 - src/nvidia/inc/kernel/vgpu/vgpuapi.h
8bf8282ce6112a2afb2e7f64d138d6ce90cf37c0 - src/nvidia/inc/kernel/vgpu/rpc_global_enums.h
69360faa428e157580fac445bcf601f44f7646c0 - src/nvidia/inc/kernel/vgpu/rpc_headers.h
b9af629ab29b527f7830b78f52b55b8535b8dbfd - src/nvidia/inc/kernel/vgpu/vgpu_util.h
e33b5b8c324c23d28e91324a87b47a24823dc5f5 - src/nvidia/inc/kernel/vgpu/rpc_vgpu.h
af9d17b204fdddc6f97280fdafd5a414ee8274dc - src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
7f3f19ed69089ba05f5cac44982547718dbf4662 - src/nvidia/inc/kernel/diagnostics/xid_context.h
3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h
8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
701375e96d771b4105f5fe4949ed4a542be4f3d7 - src/nvidia/inc/kernel/os/os_stub.h
408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h
c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h
70b67003fda6bdb8a01fa1e41c3b0e25136a856c - src/nvidia/inc/kernel/os/nv_memory_area.h
497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h
499c3d0d76276ee9441d57948ea97877c48b1daa - src/nvidia/inc/kernel/rmapi/rmapi.h
b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h
0500c41247fdecd66f25428d279c6dab72bab13e - src/nvidia/inc/kernel/rmapi/binary_api.h
61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h
835f193521f216d29c678a6018cd9791914b6c01 - src/nvidia/inc/kernel/rmapi/lock_stress.h
b9ff9b201bf2df8651f0c408158aa617638868f6 - src/nvidia/inc/kernel/rmapi/rmapi_specific.h
20adc296ffe79f27d5c24c70716c972a2e0c9a5d - src/nvidia/inc/kernel/rmapi/control.h
deed1715907c1dab8e3304bd4f63b688b72104b7 - src/nvidia/inc/kernel/rmapi/mapping_list.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h
6cc2de07b21fb21cef1b5b87fb2f1c935782262c - src/nvidia/inc/kernel/rmapi/rs_utils.h
35a65c31c6dcc2824011245ff6e2d5a30f95525c - src/nvidia/inc/kernel/rmapi/rmapi_utils.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
23e243f9abcb2a4f2d10d141303cd55677b04436 - src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h
2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h
15f788614e08d805e963653460858cf013fe0178 - src/nvidia/inc/kernel/rmapi/lock_test.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h
893ec596aab365c2ff393bf2b96aea57f37d01f8 - src/nvidia/inc/kernel/platform/nvpcf.h
5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h
e762205698aff945603324331b443bb2f20cf778 - src/nvidia/inc/kernel/platform/sli/sli.h
15754215ec49815f547dd999b2262a34670dde0b - src/nvidia/inc/kernel/core/locks.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h
ad378b09a277fba0efd3291d167e1d21071bdf1b - src/nvidia/inc/kernel/core/printf.h
a054be86a4476ba7b9a97052dfcfa4155e059cb9 - src/nvidia/inc/kernel/core/info_block.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h
37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h
2d741243a6ae800052ddd478cc6aa7ad0b18f112 - src/nvidia/inc/kernel/core/prelude.h
ebc7c06d9e94218af4cf6b0c03e83650e391e5bc - src/nvidia/inc/kernel/core/thread_state.h
b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h
07f45cd5fab5814e21b9e84425564b43776118fd - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h
10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h
59f72837997cb0c8ffc491d9a61c61e61b9dca94 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h
bca121fb72d54afd714654f1a50eb7192da3135f - src/nvidia/inc/kernel/gpu/gpu_uuid.h
3f0f23a15201105779f3d25dc7628b42990c4b7e - src/nvidia/inc/kernel/gpu/gpu_timeout.h
1ac9c8bf155d1f25f790032b2b6306223199d9ff - src/nvidia/inc/kernel/gpu/gpu_arch.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h
28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h
e33e4d1537839e41898ff0fab8949e90ee1aed46 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h
5f5677bee452c64a1b890c3eb65e81fda66ddbaa - src/nvidia/inc/kernel/gpu/error_cont.h
d624e0c45cc8ad24e8c0b2fb5281c0c8a1c7a6d3 - src/nvidia/inc/kernel/gpu/gpu_engine_type.h
c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h
145b1bc37e6c36b466ea33dd0579d22b530d8dd3 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h
c771936af1de030194894db1312d847038ddb0cb - src/nvidia/inc/kernel/gpu/gpu_child_list.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h
76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h
0a0c9a8f27feec3e90e15ce9879532ec77450de5 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h
9ed922ffed4454a10c5e2d8b3123ed653ec653e4 - src/nvidia/inc/kernel/gpu/gpu_ecc.h
f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
d0899f0e55e6675e267d4c72577be52e39b66121 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
24397d051c941427e54cefc1062d8cd977a8725e - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
22fc153d91a3917ac8e3f2aa94f0d52bfb11f7c2 - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h
173e9ecd2224a5259c79f2491302ba4415e82f70 - src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h
3118f2e9b47cfac98a92d195ce67ea63e50bf3ab - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/inc/kernel/gpu/timer/objtmr.h
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/inc/kernel/gpu/timer/tmr.h
71dd4fccd3b601508230a2b8b720aaf531a160ff - src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h
e1979c71f3d5ffc92bf2306f9360b70bca0edf1f - src/nvidia/inc/kernel/gpu/gsp/message_queue.h
23d38dc3e66affac9342a839f5ba0d79a40f63ba - src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h
bb9b8ec9840109b15c174da02e7ac85c1e2c0c70 - src/nvidia/inc/kernel/gpu/rpc/objrpc.h
1cc21ad9136024f7437ef745db6652343588c50a - src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h
7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
407cad27681bde8235305464150e275a4a93b5d5 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
5be45f3abdbb65a8eea959d98499ea8ff9a79de9 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h
76de30ac7b722cc5d59fc834d6b9c795ec14d7a5 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h
a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h
d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h
24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c
a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c
2e57601af217d0d8c4986abb593e8864e53e7e0b - src/nvidia/src/libraries/nvoc/src/runtime.c
9ea8bf51c44e500c9963a12a1e2a71ebffe6c4e8 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c
0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
e5ead344020dfc973ee7c7383e0f687a29642683 - src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c
3c885d2c0e6cfb3f8585bddcba128b02e0196167 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c
ca2ba7f19b705e39dbb8890a84ce84d34fbd8aa4 - src/nvidia/src/libraries/utils/nvassert.c
864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c
eb919a9e8711830c1c3f7fe71273e0a39862292e - src/nvidia/src/libraries/containers/vector.c
53aa343682f721f57058c7a17b1e872ca6fe7cea - src/nvidia/src/libraries/containers/map.c
7f58f03ec069ad5f5c64fedf4a484cc93473bd04 - src/nvidia/src/libraries/containers/queue.c
23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c
ae669a466f1fecf67746a9fafc8c1119294c93d7 - src/nvidia/src/libraries/containers/list.c
9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c
a0e23ad69d805a7de439f0fbf79241c6466efdc2 - src/nvidia/src/libraries/containers/eheap/eheap_old.c
cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c
a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h
eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
e2fec1a305dfec07456faec8ea5e75f601d76b5e - src/nvidia/src/libraries/nvport/memory/memory_tracking.c
c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
db01179ad5e6333844bd3e31b62d0dc262c98875 - src/nvidia/src/libraries/nvport/memory/memory_generic.h
2c00bd224d17c0cc5469b5140f3be3d23b494922 - src/nvidia/src/libraries/nvport/string/string_generic.c
b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c
702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h
099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c
f9cb28c60e7063ddb5b2a2af4a053a477c95c74b - src/nvidia/src/libraries/resserv/src/rs_server.c
dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c
ede517ff5f53666a23ad2edec7e9fcd85c6ef7d1 - src/nvidia/src/libraries/resserv/src/rs_client.c
26d872a8495e38065af34aed9a60ab9a08898d40 - src/nvidia/src/libraries/resserv/src/rs_resource.c
408e1e5430e5e507e7e59adc292175150e50b825 - src/nvidia/src/libraries/resserv/src/rs_access_rights.c
304e2fb9bbf6d37358779d4e321f33ac76efcd39 - src/nvidia/src/kernel/diagnostics/nvlog.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c
439543a41a36b0959b5f4c099f4adaa379b9f912 - src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c
c1e5733847085bede6eb128eff3bad14549a31db - src/nvidia/src/kernel/diagnostics/nvlog_printf.c
d10c5031c3bc00ae1243729c39496df38d2c9ae3 - src/nvidia/src/kernel/os/os_init.c
2255d1ae2d942c3fed9a4b0a41020d0e49cb8648 - src/nvidia/src/kernel/os/os_timer.c
b887b661ffbe6c223c60f544b1fab32690cd8c75 - src/nvidia/src/kernel/os/os_sanity.c
f228bc86fd9149675cb554d6f596d81fdd4c3770 - src/nvidia/src/kernel/os/os_stubs.c
8800bf3ec679a1c3d36b89992b3f2f95365ec834 - src/nvidia/src/kernel/rmapi/entry_points.c
348c34e13f006f1320536876cb7393d8232e61de - src/nvidia/src/kernel/rmapi/rpc_common.c
8f033323f3ae264a79f779abb163442deb17e88a - src/nvidia/src/kernel/rmapi/rmapi.c
bc7c0b5bd06a1c58714b782d85f740632c6e152f - src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h
b1e57ee17d6641412a4065317be3b81e5db94824 - src/nvidia/src/kernel/rmapi/event_notification.c
a965c5f028c1d47d7da0dd03dabbf8aebc817523 - src/nvidia/src/kernel/rmapi/rs_utils.c
a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c
7a0a8914b407f836627d8262de2de6cab2dd691d - src/nvidia/src/kernel/rmapi/rmapi_specific.c
d915b65380b59e557e5043f839c42d4105caa111 - src/nvidia/src/kernel/rmapi/rmapi_utils.c
2c5b12d5eb17c313138262cd1e42eb940a4d9ed8 - src/nvidia/src/kernel/rmapi/client.c
ab24efdee819d113fe72ec12c0e359c514151336 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h
1745523e56fc0ff5a45d4b2473e13f0cc6f2afb1 - src/nvidia/src/kernel/rmapi/event_buffer.c
f70b6d7e8f21bf26d9c8171d62cbdf934fe3a30e - src/nvidia/src/kernel/rmapi/rmapi_stubs.c
09fc97bd7daa74a0b2e55fc5632b2f25464412dc - src/nvidia/src/kernel/rmapi/client_resource.c
c21223701bd7afd09e706616105f3f5f365afa5d - src/nvidia/src/kernel/rmapi/rmapi_finn.c
433c6091b3b986151e27ea952cef1dc83ff3095c - src/nvidia/src/kernel/rmapi/lock_test.c
682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c
6dc3f6642c450043cc9b361037f4cb2091e7cb58 - src/nvidia/src/kernel/rmapi/sharing.c
00a6ef509ed8484d038c54b47642bc1a00125077 - src/nvidia/src/kernel/rmapi/lock_stress.c
3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c
1c9b26108c6b7f27c5f4fe84e10d83cfb32c9b5b - src/nvidia/src/kernel/rmapi/resource_list.h
3b9809740d88ab4b5b9c9d1adbd3ec304f6f6c7e - src/nvidia/src/kernel/rmapi/resource.c
41c397e2cc8c8b1c9c734c435d2d4c17cf709e63 - src/nvidia/src/kernel/rmapi/mapping_cpu.c
58ed3486109a54829f1afdf214c15529eaed678b - src/nvidia/src/kernel/rmapi/mapping.c
0172aa3770ca55bbfbd5e66f48f4e4820a4d5576 - src/nvidia/src/kernel/rmapi/event.c
e26021985ccfa2fb94c96310d9700df405817889 - src/nvidia/src/kernel/rmapi/control.c
6ee3cc915f68b5b70274eec219b7fd6799479459 - src/nvidia/src/kernel/rmapi/rmapi_cache.c
7a4abc27bdbcbb758545783f4182f200587ae3bd - src/nvidia/src/kernel/rmapi/binary_api.c
f821719c449e0300a3c27ebeaa3f4d6791ddaf60 - src/nvidia/src/kernel/rmapi/alloc_free.c
b7561ece996380512992736f947ddea0ba7f075e - src/nvidia/src/kernel/rmapi/resource_desc.h
72a6ae5bcae8eb4197047aaa5c1780b689544c87 - src/nvidia/src/kernel/rmapi/entry_points.h
4fbbb955e617d7b014e201a5263915939c87f884 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h
a16bffcad38862470b4424fa9a1b0d4013304600 - src/nvidia/src/kernel/core/hal_mgr.c
4d3f32dbc4cbe3d4d1301079eaf21005f74dea90 - src/nvidia/src/kernel/core/locks_common.c
e7195ca43692b6fbf6a3533437650c596cee88db - src/nvidia/src/kernel/core/locks_minimal.c
ee0bf4f81d33e9a7b6bbb2be27bb3973c8cb5b18 - src/nvidia/src/kernel/core/system.c
905a0f08067503374c757ed34d1ea87379ab4a71 - src/nvidia/src/kernel/core/thread_state.c
afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c
d3922085d63a7edf02b582fe0b6e3acba6124c25 - src/nvidia/src/kernel/core/hal/hals_all.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c
1f258d22d361a8902c27a4329e553a73b3fbe6e9 - src/nvidia/src/kernel/gpu/device.c
f520afc43afd9e40f779d2bdf3acc48ff7419625 - src/nvidia/src/kernel/gpu/eng_state.c
7ed54a614b756e32a61366d2009db26d1ef5fcc4 - src/nvidia/src/kernel/gpu/gpu_arch.c
1b2a50c873087a28cc4edd4a65945bcafc84bcf0 - src/nvidia/src/kernel/gpu/gpu_uuid.c
5bbac8b7323fe7f048e54b2ebc3ebe4f30655181 - src/nvidia/src/kernel/gpu/gpu.c
c7f5b73c217a181f5ff28886bf691ec7d528cb86 - src/nvidia/src/kernel/gpu/gpu_resource.c
2408846a2a5c24a102df13919f384c6675f56f29 - src/nvidia/src/kernel/gpu/device_ctrl.c
2b40a86a112c7643a69b094194c2ee1dd294f16a - src/nvidia/src/kernel/gpu/gpu_gspclient.c
261a5b014b3869c3ce5e830cf8b9529fa0b8a09d - src/nvidia/src/kernel/gpu/gpu_resource_desc.c
4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c
57941830e179d534a7329608658c82fd91ff4a57 - src/nvidia/src/kernel/gpu/gpu_timeout.c
89a6229720a7d5276d73ad51a210ce6f60cedb08 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c
bc508781e640dbf756d9c9e43e75227d05b413c7 - src/nvidia/src/kernel/gpu/device_share.c
84c2c6a59313d36aa70c8a01cfedf1d1e7a3d931 - src/nvidia/src/kernel/gpu/gpu_access.c
d0d744c416a52404a52c35ede015629990934003 - src/nvidia/src/kernel/gpu/gpu_engine_type.c
12c1f9494317c34b1b9bfcc58bf7bee81b08c98e - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
f4b1e5dfefeae6021e6ca20e73d0fcaec1b5e95e - src/nvidia/src/kernel/gpu/gpu_rmapi.c
099da8d641fb4481f9a4c625588dd4aa4ce20bcd - src/nvidia/src/kernel/gpu/subdevice/subdevice.c
6fab19f1f68bdb8d2b969efc6f030e2066bc6b5e - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
b4e503b320119fecdb22dfda1268ce31e1a7ecd7 - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
9afe5cedd5e7d535ee56f4f5b3cc549f154d8be2 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
796d1368584a9318a39ed313dcb86bbcca40ad83 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
4c363a34fe12b9bb0d428c3d90974d7085d0366f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c
fcf79cf10019193a9e57f8d19b5a37bac6120365 - src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c
095d4a87b067038bd2d80a1c4b2d9407810b0e66 - src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c
c20ed8bd9fda88b036c6ff677b7c25ebd171434f - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c
b09af17437a01e63e960414a4534074da240dc59 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
c67baeb5df33080d99f322786759fc3f5436301d - src/nvidia/src/kernel/gpu/disp/disp_channel.c
8fafebf746bfcde2c53435be386a8a0846973b0c - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
0fbfb9dd91147f04bea1060788efc1121078c159 - src/nvidia/src/kernel/gpu/disp/kern_disp.c
5aa67b54fcd16f648d7a72b9c2c4ff3fb6d3a5be - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
56027ec220553e1febe42f37fd70757cbb034dcb - src/nvidia/src/kernel/gpu/disp/disp_objs.c
b95080033ecc8736a0cdf9476cec7563c4a2af0f - src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c
caba45a10f43e7817f491e7856ef30dd49782f6e - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
f59763139d9993ae545ded8057706cc4d65afc0c - src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c
eb00ffa5a892558d39db15f473e2c308acfd86d9 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c
2b19caf7def14190c99dc4e41983b4a3e3334f22 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c
6d99d644a8294d08b0fdebf183306bbdadf819e3 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
57fec208154cd0d25838a688f6457598baf2de7a - src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c
64aa574198449e9556328d1c08f08b3bde5bfad0 - src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c
d911e6ae9f7b96e6f441208d38701a8d833e7455 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
ae5ef73d6e74026e0b847977c41b92cbf0f30a62 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
4cfab589176c432463859f148ad32c7dac2c83d3 - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
60e8d1fa9cd375be783c4575baa2e99ac2b22a88 - src/nvidia/src/kernel/gpu/timer/timer.c
f6e518524581b772f8fdbc80418a2018570940ca - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
10a8bfd47ce609763c07a0d61be2f71f9f91889e - src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c
bfc82499a8b9b8ce10411f6c391b0e575dc7c0d6 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
a62f423d6cf69e96b0523a233ec00353d63ee8bd - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
92611eb4f3bed31064a9efbb54a1ece7ffcfc2af - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
4a95b73f744807d96510b0ad7181eae5b12839ce - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
ce09583697a98a2d0e8466dd45764f15945f55c2 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
cebb9eee63e23bb934881b3313e422b50fb38abb - src/nvidia/src/kernel/gpu/dce_client/dce_client.c
d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c
2b49d8a3413a1731bc4fb0bab3f32ff272a71a8c - src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
fe618e428d9a172a0fd9412f5a20df64d7270418 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
593bbc5b93b620019144fadf1281a180ec050012 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
54c1d1a44474a7027c5290551e60f13678226301 - src/nvidia/src/kernel/mem_mgr/standard_mem.c
44069d6ebbd94a11267e6cc0179ab167f91faec4 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
5a5e689cf264134ae8c4300d986c209c04167743 - src/nvidia/src/kernel/mem_mgr/vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c
04876ed2dedf0ac3228ec6261a0f3f79609e44a5 - src/nvidia/src/kernel/mem_mgr/system_mem.c
873de51b330501a86ec7656fcf3f615034c49f8e - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
ed8376f04af08af8da7d47c6340ff38a8910de87 - src/nvidia/src/kernel/mem_mgr/mem.c
08762b3172f6309f1aeab895761193fa19cb176f - src/nvidia/interface/nv_sriov_defines.h
024b112ea410ee1b1badb585b03fdbabb64ade34 - src/nvidia/interface/nvrm_registry.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h
d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h
60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h
bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h
7790849d0d261e84d04ab5a481bb57309de6409a - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
82f65de514ef7e2204cfb618d398cf3af8c12778 - src/nvidia/interface/deprecated/rmapi_deprecated.h
49e299b7257e179b701747e061b6b0214d5565f0 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c
7b8431767b7c4b3861582ddab27a079568bf0660 - src/nvidia-modeset/Makefile
7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c
c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c
f69ac0ec080036b8abc7f1ae7b857989f5c9df4a - src/nvidia-modeset/include/nvkms-headsurface-3d.h
b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h
3212e81bcde5a5dcec5dbba4155a41ca52dd2304 - src/nvidia-modeset/include/nvkms-prealloc.h
24aaf3a4cb16be7a5aaa8317090142743e3dd797 - src/nvidia-modeset/include/nvkms-flip-workarea.h
be6cff078fcf66221762a4af1515e01d294dd2f6 - src/nvidia-modeset/include/nvkms-push.h
4361f10ff446c401c3f52bf36aed52ca24706d49 - src/nvidia-modeset/include/nvkms-vrr.h
08aa0dd2f18a8cf74539ea8b25ef3f3646567a0c - src/nvidia-modeset/include/nvkms-evo1.h
9bfb2d12ecdaecaba7eaaffa3040ab142d37f892 - src/nvidia-modeset/include/nvkms-prealloc-types.h
0bd9cf097cfa373f0bed7be8fe5299e2ea4bf669 - src/nvidia-modeset/include/g_nvkms-evo-states.h
708e037052ea0b3d6309fa44a205282b7a69a331 - src/nvidia-modeset/include/nvkms-difr.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h
52b6d1a1a6793d232571e6366709436b018ae3b7 - src/nvidia-modeset/include/nvkms-dpy.h
81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h
0f251b41b076bb80eeebf7d54e6fd6c764404c28 - src/nvidia-modeset/include/nvkms-evo-states.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h
6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h
eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h
377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h
e1225d674a0e6e58110750868c45a4655110a4d8 - src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h
9e3d50761d3a27c1db3085ff82b7d194ff47bf34 - src/nvidia-modeset/include/nvkms-rm.h
fd9fa6da0fc28b00be524b0bed25a68c56278363 - src/nvidia-modeset/include/nvkms-modeset.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h
e30d9c286263051d14a1862f0c630295a78abde7 - src/nvidia-modeset/include/nvkms-headsurface-priv.h
3fd0822b8b44d13685ecde9d02300e6cfbb123db - src/nvidia-modeset/include/nvkms-hdmi.h
6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h
53122264a19ea00ef26e6accde3a3a7570e46b15 - src/nvidia-modeset/include/nvkms-vblank-sem-control.h
1b21352fd9d0b1c5708cb8512acf20ba2e13955d - src/nvidia-modeset/include/nvkms-headsurface.h
59d20eff40e4e488eb3ab7c97b5e171142dcdbcf - src/nvidia-modeset/include/nvkms-modeset-workarea.h
933f9b359a1c3807771e2719c6dd80d71beff3c8 - src/nvidia-modeset/include/nvkms-utils.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h
03f3fd4c2fb7db83441805a5c350b121bd3117b4 - src/nvidia-modeset/include/nvkms-setlut-workarea.h
31acf6af2a4c82e3429efa77d110cb346c11905f - src/nvidia-modeset/include/nvkms-lut.h
e4bae9a0df729119071902f7ad59704c97adee0e - src/nvidia-modeset/include/nvkms-private.h
fbe2cbfd32b40d8188c6b25716fb360720ab5760 - src/nvidia-modeset/include/nvkms-evo.h
04f2e01c7f798a615319accc2dd713f617a81172 - src/nvidia-modeset/include/nvkms-headsurface-config.h
4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h
b4d53599736b03ee1bc149abe7b602336f40295c - src/nvidia-modeset/include/nvkms-flip.h
46fc0e138ba7be5fa3ea0ada3ee0a78656950c80 - src/nvidia-modeset/include/nvkms-modeset-types.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h
35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h
8782df838ea3d2617e9842c89389f51137b19a73 - src/nvidia-modeset/include/nvkms-headsurface-matrix.h
881d7e4187ff9c7e9d02672aedafc1605f3055ec - src/nvidia-modeset/include/nvkms-modepool.h
60c01e29aa91aa80bf3750a1b11fe61a6cdfde58 - src/nvidia-modeset/include/nvkms-types.h
cc3dc4021b76782434efd2aa81d3ffdd1f3b1f0a - src/nvidia-modeset/include/nvkms-headsurface-ioctl.h
3dc2113c55970fa70b7afb4fd30f2f1e777ebc12 - src/nvidia-modeset/include/nvkms-surface.h
aa43ad7f970331c56378b7797f66b0a77d8e99dd - src/nvidia-modeset/include/nvkms-evo3.h
8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h
9deeeae9081fd828a14f3b0df5fbf17a81161786 - src/nvidia-modeset/include/nvkms-hw-flip.h
6460f8427fdb375d659975c7f6eaadaca0ed2b2c - src/nvidia-modeset/include/dp/nvdp-device.h
1912d523f567c4fc36075942cf8acaf5d5478232 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
a233bdcd5daa0582acf2cd5b0f339ad54d09bf13 - src/nvidia-modeset/include/dp/nvdp-timer.h
2b91423ff88ca398324088d4f910e81f6944123a - src/nvidia-modeset/include/dp/nvdp-connector.h
aa8aa13c6fc48ff5ef621f243e94dcc01a46dea3 - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
c0de6efe1d5c57da324118f108ea0570a6923036 - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
1ec3df41f465a332cae8d3bb97e575c9a2b936f4 - src/nvidia-modeset/kapi/src/nvkms-kapi.c
a4d52bb238ce94f3427f25bd169e58d5d5f4abd1 - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c
80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
29309411e2bf1c2e6492a104dcb9f53705c2e9aa - src/nvidia-modeset/kapi/interface/nvkms-kapi.h
11af2aeea97398b58f628fe4685b5dfcfda5791b - src/nvidia-modeset/src/nvkms-modeset.c
016fd1b111731c6d323425d52bfe1a04d8bcade7 - src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c
37a6d00e8721a9c4134810f8be3e7168f8cbb226 - src/nvidia-modeset/src/nvkms-evo.c
4758c601621603597bd2387c4f08b3fdc17e375d - src/nvidia-modeset/src/nvkms-hw-flip.c
5e3188c2d9b580ff69e45842f841f5c92c0c6edb - src/nvidia-modeset/src/nvkms-headsurface-ioctl.c
e1a3c31638416a0132c5301fe5dd4b1c93f14376 - src/nvidia-modeset/src/nvkms-cursor3.c
d48ff2da5fac6f8cd0522a25b947b5b8c01812ba - src/nvidia-modeset/src/nvkms-rm.c
30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c
2c24667a18374ae967917df219f3775d9a79ae04 - src/nvidia-modeset/src/nvkms-headsurface-3d.c
fb8b4aa1e36f23e1927be3dbd351ab0357aeb735 - src/nvidia-modeset/src/nvkms-evo3.c
9ce404d122bbdcd5f626f2c2b7ff08a9bfcf4045 - src/nvidia-modeset/src/nvkms-flip.c
e5c96eb6b9884daf4a8d0d467b009008a45065b9 - src/nvidia-modeset/src/g_nvkms-evo-states.c
094c2169412cb577a6e9db9420da084264119284 - src/nvidia-modeset/src/nvkms-hal.c
1e0bf57319954911ddd2fe87b0cd05e257f1439e - src/nvidia-modeset/src/nvkms-surface.c
bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c
6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c
05bfe67d8cb956a666804b8f27e507bbd35e2c2d - src/nvidia-modeset/src/nvkms-difr.c
9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c
382141f251ce64e2d33add3b89225c373da9ea7d - src/nvidia-modeset/src/nvkms-hdmi.c
2e1644a912e7a27ec04288e000c3fa5439eecb60 - src/nvidia-modeset/src/nvkms-headsurface-matrix.c
127a3f77febf09d56b6fe3534bc62ff0ffa535d8 - src/nvidia-modeset/src/nvkms-dpy.c
e0756f45732035b1000a03bd8a995a46041904ae - src/nvidia-modeset/src/nvkms-vblank-sem-control.c
e4044bb85de59d662d0d579771c076cbe9b10bbb - src/nvidia-modeset/src/nvkms.c
12cbc57714f458b5673115bb5c4d380509d05277 - src/nvidia-modeset/src/nvkms-cursor.c
5c93bc35d8f93330dd7a1f7808e39c6001ee83e8 - src/nvidia-modeset/src/nvkms-headsurface-config.c
ed78249de63139ec2629bde58b616cef649281f1 - src/nvidia-modeset/src/nvkms-evo2.c
c51c4f2e3ac11bf86d4549ce5e9d9010199e37dd - src/nvidia-modeset/src/nvkms-prealloc.c
9d38d5147d06a293a272087d78d0b96b6003f11e - src/nvidia-modeset/src/nvkms-attributes.c
65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c
a62b617aa5c89056c19a5f3c91402df8cfcc1103 - src/nvidia-modeset/src/nvkms-push.c
9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c
a1c7c3c1191762c0a1038674dee0075d532ccd2d - src/nvidia-modeset/src/nvkms-headsurface.c
2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c
89b58b1e67ff7ed43c889fe7d85329d7f4762b91 - src/nvidia-modeset/src/nvkms-hw-states.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c
dd6c86b5557b02dd15a8ea0f10bde9770d90874e - src/nvidia-modeset/src/nvkms-evo4.c
be49ea18102a44914e0d7686c51430df18336383 - src/nvidia-modeset/src/nvkms-framelock.c
6bdb90474b5d31c53104f7b29b447b3f798aaa0e - src/nvidia-modeset/src/nvkms-vrr.c
05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c
e8c6d2eedfba19f8f06dd57f629588615cf1a2e9 - src/nvidia-modeset/src/nvkms-evo1.c
d15f314bea66574e0ffc72966b86bae8366412f5 - src/nvidia-modeset/src/nvkms-console-restore.c
0699860902369359e5ff1a0ef46b87e955d4bb7a - src/nvidia-modeset/src/nvkms-modepool.c
403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c
fd6ecacc4f273c88960148c070dd17d93f49909b - src/nvidia-modeset/src/nvkms-lut.c
771fee54d1123871e380db6f3227b4946b6be647 - src/nvidia-modeset/src/dp/nvdp-timer.cpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp
dcf9f99e79a13b109a8665597f0fc7c00ec37957 - src/nvidia-modeset/src/dp/nvdp-connector.cpp
e0e50fc1c526ecf0fe2f60689a25adda1257e2b3 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
16081091156a813977dfdd0718d55ea4a66a0686 - src/nvidia-modeset/src/dp/nvdp-device.cpp
6e17f81da1b94414c1cbf18c3ea92f25352d8bf5 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
81065db63fda6468fdf56d853781fca8af610798 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
e1f003a64cec57f299e65567d29e69951a62f44a - src/nvidia-modeset/src/dp/nvdp-host.cpp
ca07b8e8f507de47694ac7b3b1719b0931da02c6 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
2b49249a135293d01e82ef11ee520596c9825875 - src/nvidia-modeset/src/shaders/g_pascal_shaders
09cb78322cc8465d42a4be6a1c3682566c66462d - src/nvidia-modeset/src/shaders/g_maxwell_shaders
a62c80e00077041d38d84e06c5834dca527e8a55 - src/nvidia-modeset/src/shaders/g_volta_shader_info.h
21cf709a8717d43c4abc6b66c8faad141592b7ce - src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h
fec9074463a5505e300f9feb77b60ec77b781bb7 - src/nvidia-modeset/src/shaders/g_turing_shader_info.h
cad54ab33c1132ba7453f54e9a02d34504e4fd5c - src/nvidia-modeset/src/shaders/g_pascal_shader_info.h
f3bdeb7d46fdc9c31940ea799ce4a0d328fe1844 - src/nvidia-modeset/src/shaders/g_ampere_shaders
0ba4739302e0938b5599afb7e7ad281b21e25cec - src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h
1c02043d31faf4f79c4a54dd5a622e87ee276be8 - src/nvidia-modeset/src/shaders/g_volta_shaders
f540d144503d00941a1b32fb1a3d13061065b24e - src/nvidia-modeset/src/shaders/g_hopper_shader_info.h
74824b796722071bc3d90e4dacfed245dcda28cd - src/nvidia-modeset/src/shaders/g_turing_shaders
ce728856b76bfa428b199fd3b97e0cbc24ef54cd - src/nvidia-modeset/src/shaders/g_hopper_shaders
02bb8bc0f5d228d4a9a383d797daffd8936c4ad7 - src/nvidia-modeset/src/shaders/g_ampere_shader_info.h
9f35175e44247d4facb26a60614d40fcdb74416f - src/nvidia-modeset/src/shaders/g_shader_names.h
ca86fee8bd52e6c84e376199c5f3890078bc2031 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
885370cd7fc5f4d9d685d9d8d3e21460fd30cb38 - src/nvidia-modeset/os-interface/include/nvkms.h
51b367a6e289cc8957388745988315024f97506e - src/nvidia-modeset/interface/nvkms-api.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h
2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h
3bf4a2d1fec120ef5313c8bf119bc22fb3cf0cc5 - src/nvidia-modeset/interface/nvkms-modetimings.h
c54c62de441828282db9a4f5b35c2fa5c97d94f1 - src/nvidia-modeset/interface/nvkms-api-types.h
8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h

Change-Id: Ifb2180253a3f7d7b847ae6067701eddc8c1acc85
This commit is contained in:
svcmobrel-release
2025-10-10 11:32:33 -07:00
parent 0872bd5b3b
commit 0678317669
1519 changed files with 723068 additions and 0 deletions

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __DETECT_SELF_HOSTED_H__
#define __DETECT_SELF_HOSTED_H__
static inline int pci_devid_is_self_hosted_hopper(unsigned short devid)
{
return devid >= 0x2340 && devid <= 0x237f; // GH100 Self-Hosted
}
static inline int pci_devid_is_self_hosted_blackwell(unsigned short devid)
{
return (devid >= 0x2940 && devid <= 0x297f) // GB100 Self-Hosted
|| (devid >= 0x31c0 && devid <= 0x31ff); // GB110 Self-Hosted
}
static inline int pci_devid_is_self_hosted(unsigned short devid)
{
return pci_devid_is_self_hosted_hopper(devid) ||
pci_devid_is_self_hosted_blackwell(devid)
;
}
#endif

View File

@@ -0,0 +1,211 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_AEAD_H
#define CRYPTLIB_AEAD_H
/*=====================================================================================
* Authenticated Encryption with Associated data (AEAD) Cryptography Primitives
*=====================================================================================
*/
#if LIBSPDM_AEAD_GCM_SUPPORT
/**
* Performs AEAD AES-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated encryption succeeded.
* @retval false AEAD AES-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD AES-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated decryption succeeded.
* @retval false AEAD AES-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_GCM_SUPPORT */
#if LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
/**
* Performs AEAD ChaCha20Poly1305 authenticated encryption on a data buffer and additional
* authenticated data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated encryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated encryption failed.
**/
extern bool libspdm_aead_chacha20_poly1305_encrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, uint8_t *tag_out,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD ChaCha20Poly1305 authenticated decryption on a data buffer and additional authenticated data (AAD).
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated decryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated decryption failed.
*
**/
extern bool libspdm_aead_chacha20_poly1305_decrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, const uint8_t *tag,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT */
#if LIBSPDM_AEAD_SM4_SUPPORT
/**
* Performs AEAD SM4-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated encryption succeeded.
* @retval false AEAD SM4-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_sm4_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD SM4-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated decryption succeeded.
* @retval false AEAD SM4-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_sm4_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_SM4_SUPPORT */
#endif /* CRYPTLIB_AEAD_H */

View File

@@ -0,0 +1,416 @@
/**
* Copyright Notice:
* Copyright 2021-2024 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_CERT_H
#define CRYPTLIB_CERT_H
#if LIBSPDM_CERT_PARSE_SUPPORT
/**
* Retrieve the tag and length of the tag.
*
* @param ptr The position in the ASN.1 data.
* @param end End of data.
* @param length The variable that will receive the length.
* @param tag The expected tag.
*
* @retval true Get tag successful.
* @retval false Failed to get tag or tag not match.
**/
extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, uint32_t tag);
/**
* Retrieve the subject bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If subject_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_subject Pointer to the retrieved certificate subject bytes.
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
* and the size of buffer returned cert_subject on output.
*
* @retval true If the subject_size is not equal 0. The certificate subject retrieved successfully.
* @retval true If the subject_size is equal 0. The certificate parse successful. But the cert doesn't have subject.
* @retval false If the subject_size is not equal 0. The certificate subject retrieved successfully.But the subject_size is too small for the result.
* @retval false If the subject_size is equal 0. Invalid certificate.
**/
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size);
/**
* Retrieve the version from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] version Pointer to the retrieved version integer.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, size_t *version);
/**
* Retrieve the serialNumber from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] serial_number Pointer to the retrieved certificate serial_number bytes.
* @param[in, out] serial_number_size The size in bytes of the serial_number buffer on input,
* and the size of buffer returned serial_number on output.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size);
#if LIBSPDM_ADDITIONAL_CHECK_CERT
/**
* Retrieve the signature algorithm from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] oid Signature algorithm Object identifier buffer.
* @param[in,out] oid_size Signature algorithm Object identifier buffer size.
*
* @retval true if the oid_size is equal 0, the cert parse successfully, but cert doesn't have signature algo.
* @retval true if the oid_size is not equal 0, the cert parse and get signature algo successfully.
* @retval false if the oid_size is equal 0, the cert parse failed.
* @retval false if the oid_size is not equal 0, the cert parse and get signature algo successfully, but the input buffer size is small.
**/
extern bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
size_t cert_size, uint8_t *oid,
size_t *oid_size);
#endif /* LIBSPDM_ADDITIONAL_CHECK_CERT */
/**
* Retrieve the issuer bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If issuer_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_issuer Pointer to the retrieved certificate subject bytes.
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
* and the size of buffer returned cert_issuer on output.
*
* @retval true If the issuer_size is not equal 0. The certificate issuer retrieved successfully.
* @retval true If the issuer_size is equal 0. The certificate parse successful. But the cert doesn't have issuer.
* @retval false If the issuer_size is not equal 0. The certificate issuer retrieved successfully. But the issuer_size is too small for the result.
* @retval false If the issuer_size is equal 0. Invalid certificate.
**/
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size);
/**
* Retrieve Extension data from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] oid Object identifier buffer
* @param[in] oid_size Object identifier buffer size
* @param[out] extension_data Extension bytes.
* @param[in, out] extension_data_size Extension bytes size.
*
* @retval true If the returned extension_data_size == 0, it means that cert and oid are valid, but the oid extension is not found;
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found;
* @retval false If the returned extension_data_size == 0, it means that cert or oid are invalid;
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size);
/**
* Retrieve the Validity from one X.509 certificate
*
* If cert is NULL, then return false.
* If CertIssuerSize is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] from notBefore Pointer to date_time object.
* @param[in,out] from_size notBefore date_time object size.
* @param[out] to notAfter Pointer to date_time object.
* @param[in,out] to_size notAfter date_time object size.
*
* Note: libspdm_x509_compare_date_time to compare date_time oject
* x509SetDateTime to get a date_time object from a date_time_str
*
* @retval true if the from_size and from_size are not equal 0.
* The certificate Validity retrieved successfully.
* @retval true if the from_size and from_size are equal 0.
* The certificate Validity does not exist.
* @retval false if the from_size and from_size are not equal 0.
* The certificate Validity retrieved successfully, but the input buffer size is small.
* @retval false if the from_size and from_size are equal 0.
* Invalid certificate, or Validity retrieve failed.
**/
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size);
/**
* Format a date_time object into DataTime buffer
*
* If date_time_str is NULL, then return false.
* If date_time_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] date_time_str date_time string like YYYYMMDDhhmmssZ
* Ref: https://www.w3.org/TR/NOTE-datetime
* Z stand for UTC time
* @param[out] date_time Pointer to a date_time object.
* @param[in,out] date_time_size date_time object buffer size.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_set_date_time(const char *date_time_str, void *date_time,
size_t *date_time_size);
/**
* Compare date_time1 object and date_time2 object.
*
* If date_time1 is NULL, then return -2.
* If date_time2 is NULL, then return -2.
* If date_time1 == date_time2, then return 0
* If date_time1 > date_time2, then return 1
* If date_time1 < date_time2, then return -1
*
* @param[in] date_time1 Pointer to a date_time Ojbect
* @param[in] date_time2 Pointer to a date_time Object
*
* @retval 0 If date_time1 == date_time2
* @retval 1 If date_time1 > date_time2
* @retval -1 If date_time1 < date_time2
**/
extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2);
/**
* Retrieve the key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
*
* @retval true if the usage is no equal 0. The certificate key usage retrieved successfully.
* @retval true if the usage is equal 0. The certificate parse successfully, but the cert doesn't have key usage.
* @retval false Invalid certificate, or usage is NULL.
**/
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
/**
* Retrieve the Extended key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage bytes.
* @param[in, out] usage_size Key usage buffer size in bytes.
*
* @retval true If the returned usage_size == 0, it means that cert and oid are valid, but the Extended key usage is not found;
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found;
* @retval false If the returned usage_size == 0, it means that cert or oid are invalid;
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size);
/**
* Retrieve the basic constraints from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] basic_constraints Basic constraints bytes.
* @param[in, out] basic_constraints_size Basic constraints buffer size in bytes.
*
* @retval true If the returned basic_constraints_size == 0, it means that cert and oid are valid, but the basic_constraints is not found;
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found;
* @retval false If the returned basic_constraints_size == 0, it means that cert or oid are invalid;
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* If cert is NULL, then return false.
* If ca_cert is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate to be verified.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] ca_cert Pointer to the DER-encoded trusted CA certificate.
* @param[in] ca_cert_size Size of the CA Certificate in bytes.
*
* @retval true The certificate was issued by the trusted CA.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Certificate itself. and
* subsequent certificate is signed by the preceding
* certificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] root_cert Trusted Root Certificate buffer.
*
* @param[in] root_cert_length Trusted Root Certificate buffer length.
*
* @retval true All certificates were issued by the first certificate in X509Certchain.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
**/
extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain,
size_t cert_chain_length);
/**
* Get one X509 certificate from cert_chain.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Certificate itself. and
* subsequent certificate is signed by the preceding
* certificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] cert_index Index of certificate. If index is -1 indicates the
* last certificate in cert_chain.
*
* @param[out] cert The certificate at the index of cert_chain.
* @param[out] cert_length The length certificate at the index of cert_chain.
*
* @retval true Success.
* @retval false Failed to get certificate from certificate chain.
**/
extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length);
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/**
* Retrieve the RSA public key from one DER-encoded X509 certificate.
*
* If cert is NULL, then return false.
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] rsa_context Pointer to newly generated RSA context which contain the retrieved
* RSA public key component. Use libspdm_rsa_free() function to free the
* resource.
*
* @retval true RSA public key was retrieved successfully.
* @retval false Fail to retrieve RSA public key from X509 certificate.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Retrieve the EC public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ec_context Pointer to newly generated EC DSA context which contain the retrieved
* EC public key component. Use libspdm_ec_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ec_context is NULL, then return false.
*
* @retval true EC public key was retrieved successfully.
* @retval false Fail to retrieve EC public key from X509 certificate.
*
**/
extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Retrieve the Ed public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ecd_context Pointer to newly generated Ed DSA context which contain the retrieved
* Ed public key component. Use libspdm_ecd_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ecd_context is NULL, then return false.
*
* @retval true Ed public key was retrieved successfully.
* @retval false Fail to retrieve Ed public key from X509 certificate.
*
**/
extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Retrieve the sm2 public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] sm2_context Pointer to newly generated sm2 context which contain the retrieved
* sm2 public key component. Use sm2_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If sm2_context is NULL, then return false.
*
* @retval true sm2 public key was retrieved successfully.
* @retval false Fail to retrieve sm2 public key from X509 certificate.
*
**/
extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#endif /* LIBSPDM_CERT_PARSE_SUPPORT */
#endif /* CRYPTLIB_CERT_H */

View File

@@ -0,0 +1,98 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_DH_H
#define CRYPTLIB_DH_H
/*=====================================================================================
* Diffie-Hellman Key Exchange Primitives
*=====================================================================================
*/
#if LIBSPDM_FFDHE_SUPPORT
/**
* Allocates and initializes one Diffie-Hellman context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Diffie-Hellman context that has been initialized.
* If the allocations fails, libspdm_dh_new_by_nid() returns NULL.
* If the interface is not supported, libspdm_dh_new_by_nid() returns NULL.
**/
extern void *libspdm_dh_new_by_nid(size_t nid);
/**
* Release the specified DH context.
*
* @param[in] dh_context Pointer to the DH context to be released.
**/
void libspdm_dh_free(void *dh_context);
/**
* Generates DH public key.
*
* This function generates random secret exponent, and computes the public key, which is
* returned via parameter public_key and public_key_size. DH context is updated accordingly.
* If the public_key buffer is too small to hold the public key, false is returned and
* public_key_size is set to the required buffer size to obtain the public key.
*
* If dh_context is NULL, then return false.
* If public_key_size is NULL, then return false.
* If public_key_size is large enough but public_key is NULL, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the public_size is 256.
* For FFDHE3072, the public_size is 384.
* For FFDHE4096, the public_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[out] public_key Pointer to the buffer to receive generated public key.
* @param[in, out] public_key_size On input, the size of public_key buffer in bytes.
* On output, the size of data returned in public_key buffer in
* bytes.
*
* @retval true DH public key generation succeeded.
* @retval false DH public key generation failed.
* @retval false public_key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_generate_key(void *dh_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key, this function computes the exchanged common key, based on its own
* context including value of prime modulus and random secret exponent.
*
* If dh_context is NULL, then return false.
* If peer_public_key is NULL, then return false.
* If key_size is NULL, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the peer_public_size and key_size is 256.
* For FFDHE3072, the peer_public_size and key_size is 384.
* For FFDHE4096, the peer_public_size and key_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[in] peer_public_key Pointer to the peer's public key.
* @param[in] peer_public_key_size size of peer's public key in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in
* bytes.
*
* @retval true DH exchanged key generation succeeded.
* @retval false DH exchanged key generation failed.
* @retval false key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_compute_key(void *dh_context, const uint8_t *peer_public_key,
size_t peer_public_key_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_FFDHE_SUPPORT */
#endif /* CRYPTLIB_DH_H */

View File

@@ -0,0 +1,246 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_EC_H
#define CRYPTLIB_EC_H
/*=====================================================================================
* Elliptic Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT)
/**
* Allocates and Initializes one Elliptic Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Elliptic Curve context that has been initialized.
* If the allocations fails, libspdm_ec_new_by_nid() returns NULL.
**/
extern void *libspdm_ec_new_by_nid(size_t nid);
/**
* Release the specified EC context.
*
* @param[in] ec_context Pointer to the EC context to be released.
**/
extern void libspdm_ec_free(void *ec_context);
#if LIBSPDM_FIPS_MODE
/**
* Sets the private key component into the established EC context.
*
* For P-256, the private_key_size is 32 byte.
* For P-384, the private_key_size is 48 byte.
* For P-521, the private_key_size is 66 byte.
*
* @param[in, out] ec_context Pointer to EC context being set.
* @param[in] private_key Pointer to the private key buffer.
* @param[in] private_key_size The size of private key buffer in bytes.
*
* @retval true EC private key component was set successfully.
* @retval false Invalid EC private key component.
*
**/
extern bool libspdm_ec_set_priv_key(void *ec_context, const uint8_t *private_key,
size_t private_key_size);
/**
* Sets the public key component into the established EC context.
*
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
*
* @param[in, out] ec_context Pointer to EC context being set.
* @param[in] public Pointer to the buffer to receive generated public X,Y.
* @param[in] public_size The size of public buffer in bytes.
*
* @retval true EC public key component was set successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size);
#endif /* LIBSPDM_FIPS_MODE */
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
#if LIBSPDM_ECDHE_SUPPORT
/**
* Generates EC key and returns EC public key (X, Y).
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* EC context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
*
* If ec_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[out] public Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true EC public X,Y generation succeeded.
* @retval false EC public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If ec_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
*
* For P-256, the peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size is 32.
* For P-384, the peer_public_size is 96. first 48-byte is X, second 48-byte is Y.
* The key_size is 48.
* For P-521, the peer_public_size is 132. first 66-byte is X, second 66-byte is Y.
* The key_size is 66.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in bytes.
*
* @retval true EC exchanged key generation succeeded.
* @retval false EC exchanged key generation failed.
* @retval false key_size is not large enough.
**/
extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_ECDHE_SUPPORT */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Generates Elliptic Curve context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] ec_context Pointer to newly generated EC context which contains the
* EC public key component.
* Use libspdm_ec_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If ec_context is NULL, then return false.
*
* @retval true EC context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_ec_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **ec_context);
/**
* Carries out the EC-DSA signature.
*
* This function carries out the EC-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in EC-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
#if LIBSPDM_FIPS_MODE
/**
* Carries out the EC-DSA signature with caller input random function. This API can be used for FIPS test.
*
* @param[in] ec_context Pointer to EC context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
* @param[in] random_func random number function
*
* @retval true signature successfully generated in EC-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_ecdsa_sign_ex(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size,
int (*random_func)(void *, unsigned char *, size_t));
#endif/*LIBSPDM_FIPS_MODE*/
/**
* Verifies the EC-DSA signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to EC-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in EC-DSA.
* @retval false Invalid signature or invalid EC context.
**/
extern bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#endif /* CRYPTLIB_EC_H */

View File

@@ -0,0 +1,173 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_ECD_H
#define CRYPTLIB_ECD_H
/*=====================================================================================
* Edwards-Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Allocates and Initializes one Edwards-Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Edwards-Curve context that has been initialized.
* If the allocations fails, libspdm_ecd_new_by_nid() returns NULL.
**/
extern void *libspdm_ecd_new_by_nid(size_t nid);
/**
* Generates Edwards-Curve context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] ec_context Pointer to newly generated Ed context which contains the
* Ed public key component.
* Use libspdm_ecd_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If ecd_context is NULL, then return false.
*
* @retval true Ed context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_ecd_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **ecd_context);
/**
* Release the specified Ed context.
*
* @param[in] ecd_context Pointer to the Ed context to be released.
**/
extern void libspdm_ecd_free(void *ecd_context);
/**
* Sets the public key component into the established Ed context.
*
* For ed25519, the public_size is 32.
* For ed448, the public_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[in] public_key Pointer to the buffer to receive generated public X,Y.
* @param[in] public_size The size of public buffer in bytes.
*
* @retval true Ed public key component was set successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ecd_set_pub_key(void *ecd_context, const uint8_t *public_key,
size_t public_key_size);
/**
* Sets the private key component into the established Ed context.
*
* For ed25519, the private_size is 32.
* For ed448, the private_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[in] private Pointer to the buffer to receive generated private X,Y.
* @param[in] private_size The size of private buffer in bytes.
*
* @retval true Ed private key component was set successfully.
* @retval false Invalid EC private key component.
*
**/
bool libspdm_ecd_set_pri_key(void *ecd_context, const uint8_t *private_key,
size_t private_key_size);
/**
* Gets the public key component from the established Ed context.
*
* For ed25519, the public_size is 32.
* For ed448, the public_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[out] public Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true Ed key component was retrieved successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ecd_get_pub_key(void *ecd_context, uint8_t *public_key,
size_t *public_key_size);
/**
* Carries out the Ed-DSA signature.
*
* This function carries out the Ed-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be NULL.
* If sig_size is large enough but signature is NULL, then return false.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size size of the message in bytes.
* @param[out] signature Pointer to buffer to receive Ed-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in Ed-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_eddsa_sign(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the Ed-DSA signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be NULL.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to Ed-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in Ed-DSA.
* @retval false Invalid signature or invalid Ed context.
**/
extern bool libspdm_eddsa_verify(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#endif /* CRYPTLIB_ECD_H */

View File

@@ -0,0 +1,772 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HASH_H
#define CRYPTLIB_HASH_H
/* SHA-256 digest size in bytes. */
#define LIBSPDM_SHA256_DIGEST_SIZE 32
/* SHA-384 digest size in bytes. */
#define LIBSPDM_SHA384_DIGEST_SIZE 48
/* SHA-512 digest size in bytes. */
#define LIBSPDM_SHA512_DIGEST_SIZE 64
/* SHA3-256 digest size in bytes. */
#define LIBSPDM_SHA3_256_DIGEST_SIZE 32
/* SHA3-384 digest size in bytes. */
#define LIBSPDM_SHA3_384_DIGEST_SIZE 48
/* SHA3-512 digest size in bytes. */
#define LIBSPDM_SHA3_512_DIGEST_SIZE 64
/* SM3_256 digest size in bytes. */
#define LIBSPDM_SM3_256_DIGEST_SIZE 32
/*=====================================================================================
* One-way cryptographic hash SHA2 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, sha256_new() returns NULL. *
**/
extern void *libspdm_sha256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha256_free(void *sha256_context);
/**
* Initializes user-supplied memory pointed to by sha256_context as SHA-256 hash context for
* subsequent use.
*
* If sha256_context is NULL, then return false.
*
* @param[out] sha256_context Pointer to SHA-256 context being initialized.
*
* @retval true SHA-256 context initialization succeeded.
* @retval false SHA-256 context initialization failed.
**/
extern bool libspdm_sha256_init(void *sha256_context);
/**
* Makes a copy of an existing SHA-256 context.
*
* If sha256_context is NULL, then return false.
* If new_sha256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha256_context Pointer to SHA-256 context being copied.
* @param[out] new_sha256_context Pointer to new SHA-256 context.
*
* @retval true SHA-256 context copy succeeded.
* @retval false SHA-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_duplicate(const void *sha256_context, void *new_sha256_context);
/**
* Digests the input data and updates SHA-256 context.
*
* This function performs SHA-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-256 context should be already correctly initialized by libspdm_sha256_init(), and must not
* have been finalized by libspdm_sha256_final(). Behavior with invalid context is undefined.
*
* If sha256_context is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-256 data digest succeeded.
* @retval false SHA-256 data digest failed.
**/
extern bool libspdm_sha256_update(void *sha256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-256 digest value.
*
* This function completes SHA-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-256 context cannot
* be used again. SHA-256 context should be already correctly initialized by libspdm_sha256_init(),
* and must not have been finalized by libspdm_sha256_final(). Behavior with invalid SHA-256 context
* is undefined.
*
* If sha256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest
* value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
**/
extern bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value);
/**
* Computes the SHA-256 message digest of an input data buffer.
*
* This function performs the SHA-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha384_new() returns NULL.
**/
extern void *libspdm_sha384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha384_free(void *sha384_context);
/**
* Initializes user-supplied memory pointed to by sha384_context as SHA-384 hash context for
* subsequent use.
*
* If sha384_context is NULL, then return false.
*
* @param[out] sha384_context Pointer to SHA-384 context being initialized.
*
* @retval true SHA-384 context initialization succeeded.
* @retval false SHA-384 context initialization failed.
**/
extern bool libspdm_sha384_init(void *sha384_context);
/**
* Makes a copy of an existing SHA-384 context.
*
* If sha384_context is NULL, then return false.
* If new_sha384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha384_context Pointer to SHA-384 context being copied.
* @param[out] new_sha384_context Pointer to new SHA-384 context.
*
* @retval true SHA-384 context copy succeeded.
* @retval false SHA-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_duplicate(const void *sha384_context, void *new_sha384_context);
/**
* Digests the input data and updates SHA-384 context.
*
* This function performs SHA-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-384 context should be already correctly initialized by libspdm_sha384_init(), and must not
* have been finalized by libspdm_sha384_final(). Behavior with invalid context is undefined.
*
* If sha384_context is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-384 data digest succeeded.
* @retval false SHA-384 data digest failed.
**/
extern bool libspdm_sha384_update(void *sha384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-384 digest value.
*
* This function completes SHA-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-384 context cannot
* be used again. SHA-384 context should be already correctly initialized by libspdm_sha384_init(),
* and must not have been finalized by libspdm_sha384_final(). Behavior with invalid SHA-384 context
* is undefined.
*
* If sha384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest
* value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
**/
extern bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value);
/**
* Computes the SHA-384 message digest of an input data buffer.
*
* This function performs the SHA-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha512_new() returns NULL.
**/
extern void *libspdm_sha512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha512_free(void *sha512_context);
/**
* Initializes user-supplied memory pointed by sha512_context as SHA-512 hash context for
* subsequent use.
*
* If sha512_context is NULL, then return false.
*
* @param[out] sha512_context Pointer to SHA-512 context being initialized.
*
* @retval true SHA-512 context initialization succeeded.
* @retval false SHA-512 context initialization failed.
**/
extern bool libspdm_sha512_init(void *sha512_context);
/**
* Makes a copy of an existing SHA-512 context.
*
* If sha512_context is NULL, then return false.
* If new_sha512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha512_context Pointer to SHA-512 context being copied.
* @param[out] new_sha512_context Pointer to new SHA-512 context.
*
* @retval true SHA-512 context copy succeeded.
* @retval false SHA-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_duplicate(const void *sha512_context, void *new_sha512_context);
/**
* Digests the input data and updates SHA-512 context.
*
* This function performs SHA-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-512 context should be already correctly initialized by libspdm_sha512_init(), and must not
* have been finalized by libspdm_sha512_final(). Behavior with invalid context is undefined.
*
* If sha512_context is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-512 data digest succeeded.
* @retval false SHA-512 data digest failed.
**/
extern bool libspdm_sha512_update(void *sha512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-512 digest value.
*
* This function completes SHA-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-512 context cannot
* be used again. SHA-512 context should be already correctly initialized by libspdm_sha512_init(),
* and must not have been finalized by libspdm_sha512_final(). Behavior with invalid SHA-512 context
* is undefined.
*
* If sha512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest
* value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
**/
extern bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value);
/**
* Computes the SHA-512 message digest of an input data buffer.
*
* This function performs the SHA-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
/*=====================================================================================
* One-way cryptographic hash SHA3 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_256_new() returns NULL.
**/
extern void *libspdm_sha3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_256_free(void *sha3_256_context);
/**
* Initializes user-supplied memory pointed by sha3_256_context as SHA3-256 hash context for
* subsequent use.
*
* If sha3_256_context is NULL, then return false.
*
* @param[out] sha3_256_context Pointer to SHA3-256 context being initialized.
*
* @retval true SHA3-256 context initialization succeeded.
* @retval false SHA3-256 context initialization failed.
**/
extern bool libspdm_sha3_256_init(void *sha3_256_context);
/**
* Makes a copy of an existing SHA3-256 context.
*
* If sha3_256_context is NULL, then return false.
* If new_sha3_256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_256_context Pointer to SHA3-256 context being copied.
* @param[out] new_sha3_256_context Pointer to new SHA3-256 context.
*
* @retval true SHA3-256 context copy succeeded.
* @retval false SHA3-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_duplicate(const void *sha3_256_context, void *new_sha3_256_context);
/**
* Digests the input data and updates SHA3-256 context.
*
* This function performs SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-256 context should be already correctly initialized by libspdm_sha3_256_init(), and must not
* have been finalized by libspdm_sha3_256_final(). Behavior with invalid context is undefined.
*
* If sha3_256_context is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size size of data buffer in bytes.
*
* @retval true SHA3-256 data digest succeeded.
* @retval false SHA3-256 data digest failed.
**/
extern bool libspdm_sha3_256_update(void *sha3_256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-256 digest value.
*
* This function completes SHA3-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-256 context should be already correctly initialized by
* libspdm_sha3_256_init(), and must not have been finalized by libspdm_sha3_256_final().
* Behavior with invalid SHA3-256 context is undefined.
*
* If sha3_256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest
* value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
**/
extern bool libspdm_sha3_256_final(void *sha3_256_context, uint8_t *hash_value);
/**
* Computes the SHA3-256 message digest of an input data buffer.
*
* This function performs the SHA3-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_384_new() returns NULL.
**/
extern void *libspdm_sha3_384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_384_free(void *sha3_384_context);
/**
* Initializes user-supplied memory pointed by sha3_384_context as SHA3-384 hash context for
* subsequent use.
*
* If sha3_384_context is NULL, then return false.
*
* @param[out] sha3_384_context Pointer to SHA3-384 context being initialized.
*
* @retval true SHA3-384 context initialization succeeded.
* @retval false SHA3-384 context initialization failed.
**/
extern bool libspdm_sha3_384_init(void *sha3_384_context);
/**
* Makes a copy of an existing SHA3-384 context.
*
* If sha3_384_context is NULL, then return false.
* If new_sha3_384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_384_context Pointer to SHA3-384 context being copied.
* @param[out] new_sha3_384_context Pointer to new SHA3-384 context.
*
* @retval true SHA3-384 context copy succeeded.
* @retval false SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_duplicate(const void *sha3_384_context, void *new_sha3_384_context);
/**
* Digests the input data and updates SHA3-384 context.
*
* This function performs SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-384 context should be already correctly initialized by libspdm_sha3_384_init(), and must not
* have been finalized by libspdm_sha3_384_final(). Behavior with invalid context is undefined.
*
* If sha3_384_context is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-384 data digest succeeded.
* @retval false SHA3-384 data digest failed.
**/
extern bool libspdm_sha3_384_update(void *sha3_384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-384 digest value.
*
* This function completes SHA3-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-384 context cannot
* be used again. SHA3-384 context should be already correctly initialized by
* libspdm_sha3_384_init(), and must not have been finalized by libspdm_sha3_384_final().
* Behavior with invalid SHA3-384 context is undefined.
*
* If sha3_384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest
* value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
*
**/
extern bool libspdm_sha3_384_final(void *sha3_384_context, uint8_t *hash_value);
/**
* Computes the SHA3-384 message digest of an input data buffer.
*
* This function performs the SHA3-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_512_new() returns NULL.
**/
extern void *libspdm_sha3_512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_512_free(void *sha3_512_context);
/**
* Initializes user-supplied memory pointed by sha3_512_context as SHA3-512 hash context for
* subsequent use.
*
* If sha3_512_context is NULL, then return false.
*
* @param[out] sha3_512_context Pointer to SHA3-512 context being initialized.
*
* @retval true SHA3-512 context initialization succeeded.
* @retval false SHA3-512 context initialization failed.
**/
extern bool libspdm_sha3_512_init(void *sha3_512_context);
/**
* Makes a copy of an existing SHA3-512 context.
*
* If sha3_512_context is NULL, then return false.
* If new_sha3_512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_512_context Pointer to SHA3-512 context being copied.
* @param[out] new_sha3_512_context Pointer to new SHA3-512 context.
*
* @retval true SHA3-512 context copy succeeded.
* @retval false SHA3-512 context copy failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_sha3_512_duplicate(const void *sha3_512_context, void *new_sha3_512_context);
/**
* Digests the input data and updates SHA3-512 context.
*
* This function performs SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-512 context should be already correctly initialized by libspdm_sha3_512_init(), and must not
* have been finalized by libspdm_sha3_512_final(). Behavior with invalid context is undefined.
*
* If sha3_512_context is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-512 data digest succeeded.
* @retval false SHA3-512 data digest failed.
**/
extern bool libspdm_sha3_512_update(void *sha3_512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-512 digest value.
*
* This function completes SHA3-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-512 context should be already correctly initialized by
* libspdm_sha3_512_init(), and must not have been finalized by libspdm_sha3_512_final().
* Behavior with invalid SHA3-512 context is undefined.
*
* If sha3_512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest
* value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
**/
extern bool libspdm_sha3_512_final(void *sha3_512_context, uint8_t *hash_value);
/**
* Computes the SHA3-512 message digest of an input data buffer.
*
* This function performs the SHA3-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
/*=====================================================================================
* One-Way Cryptographic hash SM3 Primitives
*=====================================================================================
*/
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SM3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sm3_256_new() returns NULL.
**/
extern void *libspdm_sm3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sm3_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sm3_256_free(void *sm3_context);
/**
* Initializes user-supplied memory pointed by sm3_context as SM3 hash context for
* subsequent use.
*
* If sm3_context is NULL, then return false.
*
* @param[out] sm3_context Pointer to SM3 context being initialized.
*
* @retval true SM3 context initialization succeeded.
* @retval false SM3 context initialization failed.
**/
extern bool libspdm_sm3_256_init(void *sm3_context);
/**
* Makes a copy of an existing SM3 context.
*
* If sm3_context is NULL, then return false.
* If new_sm3_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sm3_context Pointer to SM3 context being copied.
* @param[out] new_sm3_context Pointer to new SM3 context.
*
* @retval true SM3 context copy succeeded.
* @retval false SM3 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_duplicate(const void *sm3_context, void *new_sm3_context);
/**
* Digests the input data and updates SM3 context.
*
* This function performs SM3 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SM3 context should be already correctly initialized by sm3_init(), and should not be finalized
* by sm3_final(). Behavior with invalid context is undefined.
*
* If sm3_context is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SM3 data digest succeeded.
* @retval false SM3 data digest failed.
**/
extern bool libspdm_sm3_256_update(void *sm3_context, const void *data, size_t data_size);
/**
* Completes computation of the SM3 digest value.
*
* This function completes SM3 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the SM3 context cannot
* be used again. SM3 context should be already correctly initialized by sm3_init(), and should not
* be finalized by sm3_final(). Behavior with invalid SM3 context is undefined.
*
* If sm3_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
**/
extern bool libspdm_sm3_256_final(void *sm3_context, uint8_t *hash_value);
/**
* Computes the SM3 message digest of an input data buffer.
*
* This function performs the SM3 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HASH_H */

View File

@@ -0,0 +1,266 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HKDF_H
#define CRYPTLIB_HKDF_H
/*=====================================================================================
* Key Derivation Function Primitives
*=====================================================================================*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Derive SHA-256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive prk value.
* @param[in] prk_out_size Size of prk bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Derive SHA384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Derive SHA512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Derive SHA3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Derive SHA3_384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Derive SHA3_512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Derive SM3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SM3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HKDF_H */

View File

@@ -0,0 +1,833 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_MAC_H
#define CRYPTLIB_MAC_H
/*=====================================================================================
* Message Authentication Code (MAC) Primitives
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha256_new() returns NULL.
**/
extern void *libspdm_hmac_sha256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha256_free(void *hmac_sha256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha256_update().
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha256_ctx Pointer to HMAC-SHA256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA256 context.
*
* If hmac_sha256_ctx is NULL, then return false.
* If new_hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha256_ctx Pointer to HMAC-SHA256 context being copied.
* @param[out] new_hmac_sha256_ctx Pointer to new HMAC-SHA256 context.
*
* @retval true HMAC-SHA256 context copy succeeded.
* @retval false HMAC-SHA256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, void *new_hmac_sha256_ctx);
/**
* Digests the input data and updates HMAC-SHA256 context.
*
* This function performs HMAC-SHA256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should not be
* finalized by libspdm_hmac_sha256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA256 data digest succeeded.
* @retval false HMAC-SHA256 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA256 digest value.
*
* This function completes HMAC-SHA256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA256 context cannot
* be used again. HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should
* not be finalized by libspdm_hmac_sha256_final(). Behavior with invalid HMAC-SHA256 context is
* undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA256 digest of a input data buffer.
*
* This function performs the HMAC-SHA256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha384_new() returns NULL.
**/
extern void *libspdm_hmac_sha384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha384_free(void *hmac_sha384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha384_update().
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha384_ctx Pointer to HMAC-SHA384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA384 context.
*
* If hmac_sha384_ctx is NULL, then return false.
* If new_hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha384_ctx Pointer to HMAC-SHA384 context being copied.
* @param[out] new_hmac_sha384_ctx Pointer to new HMAC-SHA384 context.
*
* @retval true HMAC-SHA384 context copy succeeded.
* @retval false HMAC-SHA384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, void *new_hmac_sha384_ctx);
/**
* Digests the input data and updates HMAC-SHA384 context.
*
* This function performs HMAC-SHA384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should not be
* finalized by libspdm_hmac_sha384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA384 data digest succeeded.
* @retval false HMAC-SHA384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA384 digest value.
*
* This function completes HMAC-SHA384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA384 context cannot
* be used again. HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should
* not be finalized by libspdm_hmac_sha384_final(). Behavior with invalid HMAC-SHA384 context is
* undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA384 digest of a input data buffer.
*
* This function performs the HMAC-SHA384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha512_new() returns NULL.
**/
extern void *libspdm_hmac_sha512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha512_free(void *hmac_sha512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha512_update().
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha512_ctx Pointer to HMAC-SHA512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA512 context.
*
* If hmac_sha512_ctx is NULL, then return false.
* If new_hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha512_ctx Pointer to HMAC-SHA512 context being copied.
* @param[out] new_hmac_sha512_ctx Pointer to new HMAC-SHA512 context.
*
* @retval true HMAC-SHA512 context copy succeeded.
* @retval false HMAC-SHA512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, void *new_hmac_sha512_ctx);
/**
* Digests the input data and updates HMAC-SHA512 context.
*
* This function performs HMAC-SHA512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should not be
* finalized by libspdm_hmac_sha512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA512 data digest succeeded.
* @retval false HMAC-SHA512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA512 digest value.
*
* This function completes HMAC-SHA512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA512 context cannot
* be used again. HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should
* not be finalized by libspdm_hmac_sha512_final(). Behavior with invalid HMAC-SHA512 context is
* undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA512 digest of a input data buffer.
*
* This function performs the HMAC-SHA512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_256_free(void *hmac_sha3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_256_update().
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sha3_256_set_key(void *hmac_sha3_256_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-256 context.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If new_hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context being copied.
* @param[out] new_hmac_sha3_256_ctx Pointer to new HMAC-SHA3-256 context.
*
* @retval true HMAC-SHA3-256 context copy succeeded.
* @retval false HMAC-SHA3-256 context copy failed.
**/
extern bool libspdm_hmac_sha3_256_duplicate(const void *hmac_sha3_256_ctx,
void *new_hmac_sha3_256_ctx);
/**
* Digests the input data and updates HMAC-SHA3-256 context.
*
* This function performs HMAC-SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and should not be
* finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-256 data digest succeeded.
* @retval false HMAC-SHA3-256 data digest failed.
**/
extern bool libspdm_hmac_sha3_256_update(void *hmac_sha3_256_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-256 digest value.
*
* This function completes HMAC-SHA3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-256 context cannot
* be used again. HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and
* should not be finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid HMAC-SHA3-256
* context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
**/
extern bool libspdm_hmac_sha3_256_final(void *hmac_sha3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-256 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_384_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_384_free(void *hmac_sha3_384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_384_update().
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_set_key(void *hmac_sha3_384_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-384 context.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If new_hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context being copied.
* @param[out] new_hmac_sha3_384_ctx Pointer to new HMAC-SHA3-384 context.
*
* @retval true HMAC-SHA3-384 context copy succeeded.
* @retval false HMAC-SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_duplicate(const void *hmac_sha3_384_ctx,
void *new_hmac_sha3_384_ctx);
/**
* Digests the input data and updates HMAC-SHA3-384 context.
*
* This function performs HMAC-SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and should not be
* finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-384 data digest succeeded.
* @retval false HMAC-SHA3-384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_update(void *hmac_sha3_384_ctx, const void *data,
size_t data_size);
/**
* Completes computation of the HMAC-SHA3-384 digest value.
*
* This function completes HMAC-SHA3-384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-384 context cannot
* be used again. HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and
* should not be finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid HMAC-SHA3-384
* context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_final(void *hmac_sha3_384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-384 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_512_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_512_free(void *hmac_sha3_512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_512_update().
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_set_key(void *hmac_sha3_512_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-512 context.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If new_hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context being copied.
* @param[out] new_hmac_sha3_512_ctx Pointer to new HMAC-SHA3-512 context.
*
* @retval true HMAC-SHA3-512 context copy succeeded.
* @retval false HMAC-SHA3-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_duplicate(const void *hmac_sha3_512_ctx,
void *new_hmac_sha3_512_ctx);
/**
* Digests the input data and updates HMAC-SHA3-512 context.
*
* This function performs HMAC-SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and should not be
* finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-512 data digest succeeded.
* @retval false HMAC-SHA3-512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_update(void *hmac_sha3_512_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-512 digest value.
*
* This function completes HMAC-SHA3-512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-512 context cannot
* be used again. HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and
* should not be finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid HMAC-SHA3-512
* context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_final(void *hmac_sha3_512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-512 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SM3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sm3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sm3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sm3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sm3_256_free(void *hmac_sm3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sm3_256_update().
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sm3_256_set_key(void *hmac_sm3_256_ctx,
const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SM3-256 context.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If new_hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context being copied.
* @param[out] new_hmac_sm3_256_ctx Pointer to new HMAC-SM3-256 context.
*
* @retval true HMAC-SM3-256 context copy succeeded.
* @retval false HMAC-SM3-256 context copy failed.
**/
extern bool libspdm_hmac_sm3_256_duplicate(const void *hmac_sm3_256_ctx,
void *new_hmac_sm3_256_ctx);
/**
* Digests the input data and updates HMAC-SM3-256 context.
*
* This function performs HMAC-SM3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and should not be
* finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SM3-256 data digest succeeded.
* @retval false HMAC-SM3-256 data digest failed.
**/
extern bool libspdm_hmac_sm3_256_update(void *hmac_sm3_256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SM3-256 digest value.
*
* This function completes HMAC-SM3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SM3-256 context cannot
* be used again. HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and
* should not be finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid HMAC-SM3-256
* context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
**/
extern bool libspdm_hmac_sm3_256_final(void *hmac_sm3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SM3-256 digest of a input data buffer.
*
* This function performs the HMAC-SM3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sm3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_MAC_H */

View File

@@ -0,0 +1,30 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RNG_H
#define CRYPTLIB_RNG_H
/*=====================================================================================
* Random Number Generation Primitive
*=====================================================================================*/
/**
* Generates a random byte stream of the specified size. If initialization, testing, or seeding of
* the (pseudo)random number generator is required it should be done before this function is called.
*
* If output is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] output Pointer to buffer to receive random value.
* @param[in] size Size of random bytes to generate.
*
* @retval true Random byte stream generated successfully.
* @retval false Generation of random byte stream failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_random_bytes(uint8_t *output, size_t size);
#endif /* CRYPTLIB_RNG_H */

View File

@@ -0,0 +1,274 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RSA_H
#define CRYPTLIB_RSA_H
/*=====================================================================================
* RSA Cryptography Primitives
*=====================================================================================
*/
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/* RSA key Tags Definition used in libspdm_rsa_set_key() function for key component
* identification.
*/
typedef enum {
LIBSPDM_RSA_KEY_N, /*< RSA public Modulus (N)*/
LIBSPDM_RSA_KEY_E, /*< RSA public exponent (e)*/
LIBSPDM_RSA_KEY_D, /*< RSA Private exponent (d)*/
LIBSPDM_RSA_KEY_P, /*< RSA secret prime factor of Modulus (p)*/
LIBSPDM_RSA_KEY_Q, /*< RSA secret prime factor of Modules (q)*/
LIBSPDM_RSA_KEY_DP, /*< p's CRT exponent (== d mod (p - 1))*/
LIBSPDM_RSA_KEY_DQ, /*< q's CRT exponent (== d mod (q - 1))*/
LIBSPDM_RSA_KEY_Q_INV /*< The CRT coefficient (== 1/q mod p)*/
} libspdm_rsa_key_tag_t;
/**
* Allocates and initializes one RSA context for subsequent use.
*
* @return Pointer to the RSA context that has been initialized.
* If the allocations fails, libspdm_rsa_new() returns NULL.
**/
extern void *libspdm_rsa_new(void);
/**
* Generates RSA context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] rsa_context Pointer to newly generated RSA context which contains the
* RSA public key component.
* Use libspdm_rsa_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If rsa_context is NULL, then return false.
*
* @retval true RSA context was generated successfully.
* @retval false Invalid DER public key data.
**/
extern bool libspdm_rsa_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **rsa_context);
/**
* Release the specified RSA context.
*
* If rsa_context is NULL, then return false.
*
* @param[in] rsa_context Pointer to the RSA context to be released.
**/
extern void libspdm_rsa_free(void *rsa_context);
/**
* Sets the tag-designated key component into the established RSA context.
*
* This function sets the tag-designated RSA key component into the established
* RSA context from the user-specified non-negative integer (octet string format
* represented in RSA PKCS#1).
* If big_number is NULL, then the specified key component in RSA context is cleared.
* If rsa_context is NULL, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag tag of RSA key component being set.
* @param[in] big_number Pointer to octet integer buffer.
* If NULL, then the specified key component in RSA
* context is cleared.
* @param[in] bn_size Size of big number buffer in bytes.
* If big_number is NULL, then it is ignored.
*
* @retval true RSA key component was set successfully.
* @retval false Invalid RSA key component tag.
**/
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
const uint8_t *big_number, size_t bn_size);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_RSA_SSA_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme
* defined in RSA PKCS#1. If the signature buffer is too small to hold the contents of signature,
* false is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA PKCS1-v1_5 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in PKCS1-v1_5.
* @retval false signature generation failed.
* @retval false sig_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_pkcs1_sign_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PKCS1-v1_5 encoding scheme defined in RSA PKCS#1.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA PKCS1-v1_5 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in PKCS1-v1_5.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pkcs1_verify_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, const uint8_t *signature,
size_t sig_size);
#endif /* LIBSPDM_RSA_SSA_SUPPORT */
#if LIBSPDM_RSA_PSS_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined
* in RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in RSA-SSA PSS.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#if LIBSPDM_FIPS_MODE
/**
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme for FIPS test.
*
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2 for FIPS test.
*
* The salt length is zero.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
*
**/
extern bool libspdm_rsa_pss_sign_fips(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2 for FIPS test.
*
* The salt length is zero.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size size of the message hash in bytes.
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
* @param[in] sig_size size of signature in bytes.
*
* @retval true Valid signature encoded in RSA-SSA PSS.
* @retval false Invalid signature or invalid RSA context.
*
**/
extern bool libspdm_rsa_pss_verify_fips(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /*LIBSPDM_FIPS_MODE*/
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
#endif /* CRYPTLIB_RSA_H */

View File

@@ -0,0 +1,217 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_SM2_H
#define CRYPTLIB_SM2_H
/*=====================================================================================
* Shang-Mi2 Primitives
*=====================================================================================*/
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
/**
* Generates Shang-Mi2 context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] sm2_context Pointer to newly generated SM2 context which contains the
* SM2 public key component.
* Use libspdm_sm2_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If sm2_context is NULL, then return false.
*
* @retval true SM2 context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_sm2_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **sm2_context);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
**/
extern void libspdm_sm2_dsa_free(void *sm2_context);
/**
* Carries out the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* This function carries out the SM2 signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be SM3_256.
* If sig_size is large enough but signature is NULL, then return false.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to sm2 context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size Size of the message in bytes.
* @param[out] signature Pointer to buffer to receive SM2 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in SM2.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_sm2_dsa_sign(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be SM3_256.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to SM2 context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to SM2 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in SM2.
* @retval false Invalid signature or invalid sm2 context.
*
**/
extern bool libspdm_sm2_dsa_verify(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_key_exchange_new_by_nid(size_t nid);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
*
**/
extern void libspdm_sm2_key_exchange_free(void *sm2_context);
/**
* Initialize the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
* @param[in] hash_nid hash NID, only SM3 is valid.
* @param[in] id_a The ID-A of the key exchange context.
* @param[in] id_a_size Size of ID-A key exchange context.
* @param[in] id_b The ID-B of the key exchange context.
* @param[in] id_b_size Size of ID-B key exchange context.
* @param[in] is_initiator If the caller is initiator.
*
* @retval true sm2 context is initialized.
* @retval false sm2 context is not initialized.
**/
extern bool libspdm_sm2_key_exchange_init(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *id_b, size_t id_b_size,
bool is_initiator);
/**
* Generates sm2 key and returns sm2 public key (X, Y), based upon GB/T 32918.3-2016: SM2 - Part3.
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* sm2 context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* The public_size is 64. first 32-byte is X, second 32-byte is Y.
*
* If sm2_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[out] public_data Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true sm2 public X,Y generation succeeded.
* @retval false sm2 public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_sm2_key_exchange_generate_key(void *sm2_context, uint8_t *public_data,
size_t *public_size);
/**
* Computes exchanged common key, based upon GB/T 32918.3-2016: SM2 - Part3.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If sm2_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
*
* The id_a_size and id_b_size must be smaller than 2^16-1.
* The peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size must be smaller than 2^32-1, limited by KDF function.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in] key_size On input, the size of key buffer in bytes.
*
* @retval true sm2 exchanged key generation succeeded.
* @retval false sm2 exchanged key generation failed.
**/
extern bool libspdm_sm2_key_exchange_compute_key(void *sm2_context,
const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT */
#endif /* CRYPTLIB_SM2_H */

View File

@@ -0,0 +1,91 @@
/**
* Copyright Notice:
* Copyright 2021-2024 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef LIBSPDM_LIB_CONFIG_H
#define LIBSPDM_LIB_CONFIG_H
#ifndef LIBSPDM_CONFIG
#include "library/spdm_lib_config.h"
#else
#include LIBSPDM_CONFIG
#endif
#if defined(LIBSPDM_DEBUG_ENABLE)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
/*when in FIPS mode, only support approved algo in FIPS */
#if LIBSPDM_FIPS_MODE
#undef LIBSPDM_SM2_DSA_P256_SUPPORT
#define LIBSPDM_SM2_DSA_P256_SUPPORT 0
#undef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT
#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 0
#undef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 0
#undef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT
#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 0
#undef LIBSPDM_SM3_256_SUPPORT
#define LIBSPDM_SM3_256_SUPPORT 0
#endif /*LIBSPDM_FIPS_MODE*/
/* define crypto algorithm without parameter */
#define LIBSPDM_RSA_SSA_SUPPORT ((LIBSPDM_RSA_SSA_2048_SUPPORT) || \
(LIBSPDM_RSA_SSA_3072_SUPPORT) || \
(LIBSPDM_RSA_SSA_4096_SUPPORT))
#define LIBSPDM_RSA_PSS_SUPPORT ((LIBSPDM_RSA_PSS_2048_SUPPORT) || \
(LIBSPDM_RSA_PSS_3072_SUPPORT) || \
(LIBSPDM_RSA_PSS_4096_SUPPORT))
#define LIBSPDM_ECDSA_SUPPORT ((LIBSPDM_ECDSA_P256_SUPPORT) || \
(LIBSPDM_ECDSA_P384_SUPPORT) || \
(LIBSPDM_ECDSA_P521_SUPPORT))
#define LIBSPDM_SM2_DSA_SUPPORT (LIBSPDM_SM2_DSA_P256_SUPPORT)
#define LIBSPDM_EDDSA_SUPPORT ((LIBSPDM_EDDSA_ED25519_SUPPORT) || \
(LIBSPDM_EDDSA_ED448_SUPPORT))
#define LIBSPDM_FFDHE_SUPPORT ((LIBSPDM_FFDHE_2048_SUPPORT) || \
(LIBSPDM_FFDHE_3072_SUPPORT) || \
(LIBSPDM_FFDHE_4096_SUPPORT))
#define LIBSPDM_ECDHE_SUPPORT ((LIBSPDM_ECDHE_P256_SUPPORT) || \
(LIBSPDM_ECDHE_P384_SUPPORT) || \
(LIBSPDM_ECDHE_P521_SUPPORT))
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT (LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT)
#define LIBSPDM_AEAD_GCM_SUPPORT ((LIBSPDM_AEAD_AES_128_GCM_SUPPORT) || \
(LIBSPDM_AEAD_AES_256_GCM_SUPPORT))
#define LIBSPDM_AEAD_SM4_SUPPORT (LIBSPDM_AEAD_SM4_128_GCM_SUPPORT)
#define LIBSPDM_SHA2_SUPPORT ((LIBSPDM_SHA256_SUPPORT) || \
(LIBSPDM_SHA384_SUPPORT) || \
(LIBSPDM_SHA512_SUPPORT))
#define LIBSPDM_SHA3_SUPPORT ((LIBSPDM_SHA3_256_SUPPORT) || \
(LIBSPDM_SHA3_384_SUPPORT) || \
(LIBSPDM_SHA3_512_SUPPORT))
#define LIBSPDM_SM3_SUPPORT (LIBSPDM_SM3_256_SUPPORT)
#if LIBSPDM_CHECK_MACRO
#include "internal/libspdm_macro_check.h"
#endif /* LIBSPDM_CHECK_MACRO */
#endif /* LIBSPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,170 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __INTERNAL_CRYPT_LIB_H__
#define __INTERNAL_CRYPT_LIB_H__
/*
* This code uses Linux Kernel Crypto API extensively. Web page written by
* Stephan Mueller and Marek Vasut is a good starting reference on how linux
* kernel provides crypto api.
*/
#include "conftest.h"
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/random.h>
#include <linux/string.h>
// Check if ECDH/ECDSA are there, on some platforms they might not be...
#ifndef AUTOCONF_INCLUDED
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT)
#include <generated/autoconf.h>
#else
#include <linux/autoconf.h>
#endif
#endif
#if \
(defined(CONFIG_CRYPTO_AEAD) || defined(CONFIG_CRYPTO_AEAD_MODULE)) && \
(defined(CONFIG_CRYPTO_AKCIPHER) || defined(CONFIG_CRYPTO_AKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_SKCIPHER) || defined(CONFIG_CRYPTO_SKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_HASH) || defined(CONFIG_CRYPTO_HASH_MODULE)) && \
(defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDH) || defined(CONFIG_CRYPTO_ECDH_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDSA) || defined(CONFIG_CRYPTO_ECDSA_MODULE)) && \
(defined(CONFIG_CRYPTO_RSA) || defined(CONFIG_CRYPTO_RSA_MODULE)) && \
(defined(CONFIG_X509_CERTIFICATE_PARSER) || defined(CONFIG_X509_CERTIFICATE_PARSER_MODULE))
#define NV_CONFIG_CRYPTO_PRESENT 1
#endif
/*
* It is possible that we don't have access to all the functions we have. This
* could be either because we are running non-gpl kernel, because kernel is too
* old or even just user disabled. If we should use LKCA, include headers, else
* define stubs to return errors.
*/
#if defined(NV_CRYPTO_PRESENT) && defined (NV_CONFIG_CRYPTO_PRESENT) && \
(defined(NV_CRYPTO_AKCIPHER_VERIFY_PRESENT) || \
(defined(NV_CRYPTO_SIG_H_PRESENT) && defined(NV_ECC_DIGITS_FROM_BYTES_PRESENT)))
#define USE_LKCA 1
#endif
#ifdef USE_LKCA
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sm3.h>
// HASH_MAX_DIGESTSIZE is available since 4.20.
// This value is accurate as of 6.1
#ifndef HASH_MAX_DIGESTSIZE
#define HASH_MAX_DIGESTSIZE 64
#endif
#else
// Just stub everything out
struct shash_desc;
struct crypto_shash;
#define crypto_shash_setkey(...) -ENOMEM
#define crypto_shash_init(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_final(...) -ENOMEM
#endif
#define CHAR_BIT 8U
#undef SIZE_MAX
#define SIZE_MAX 8
#include "library/cryptlib.h"
#define LIBSPDM_ASSERT(...)
struct lkca_aead_ctx;
int lkca_aead_alloc(struct lkca_aead_ctx **ctx, char const *alg);
void lkca_aead_free(struct lkca_aead_ctx *ctx);
int lkca_aead_ex(struct lkca_aead_ctx *ctx,
const uint8_t *key, size_t key_size,
uint8_t *iv, size_t iv_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc);
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg);
void *lkca_hash_new(const char* alg_name);
void lkca_hash_free(struct shash_desc *ctx);
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value);
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hmac_set_key(struct shash_desc *ctx, const uint8_t *key, size_t key_size);
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value);
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size);
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size);
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size);
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size);
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
bool lkca_rsa_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
bool lkca_rsa_pkcs1_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
bool lkca_rsa_pss_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
#endif

View File

@@ -0,0 +1,109 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
/** @file
* Defines base cryptographic library APIs.
* The Base Cryptographic Library provides implementations of basic cryptography
* primitives (hash Serials, HMAC, AES, RSA, Diffie-Hellman, Elliptic Curve, etc) for security
* functionality enabling.
**/
#ifndef CRYPTLIB_H
#define CRYPTLIB_H
#include "internal/libspdm_lib_config.h"
#define LIBSPDM_CRYPTO_NID_NULL 0x0000
/* Hash */
#define LIBSPDM_CRYPTO_NID_SHA256 0x0001
#define LIBSPDM_CRYPTO_NID_SHA384 0x0002
#define LIBSPDM_CRYPTO_NID_SHA512 0x0003
#define LIBSPDM_CRYPTO_NID_SHA3_256 0x0004
#define LIBSPDM_CRYPTO_NID_SHA3_384 0x0005
#define LIBSPDM_CRYPTO_NID_SHA3_512 0x0006
#define LIBSPDM_CRYPTO_NID_SM3_256 0x0007
/* Signing */
#define LIBSPDM_CRYPTO_NID_RSASSA2048 0x0101
#define LIBSPDM_CRYPTO_NID_RSASSA3072 0x0102
#define LIBSPDM_CRYPTO_NID_RSASSA4096 0x0103
#define LIBSPDM_CRYPTO_NID_RSAPSS2048 0x0104
#define LIBSPDM_CRYPTO_NID_RSAPSS3072 0x0105
#define LIBSPDM_CRYPTO_NID_RSAPSS4096 0x0106
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P256 0x0107
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P384 0x0108
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P521 0x0109
#define LIBSPDM_CRYPTO_NID_SM2_DSA_P256 0x010A
#define LIBSPDM_CRYPTO_NID_EDDSA_ED25519 0x010B
#define LIBSPDM_CRYPTO_NID_EDDSA_ED448 0x010C
/* Key Exchange */
#define LIBSPDM_CRYPTO_NID_FFDHE2048 0x0201
#define LIBSPDM_CRYPTO_NID_FFDHE3072 0x0202
#define LIBSPDM_CRYPTO_NID_FFDHE4096 0x0203
#define LIBSPDM_CRYPTO_NID_SECP256R1 0x0204
#define LIBSPDM_CRYPTO_NID_SECP384R1 0x0205
#define LIBSPDM_CRYPTO_NID_SECP521R1 0x0206
#define LIBSPDM_CRYPTO_NID_SM2_KEY_EXCHANGE_P256 0x0207
#define LIBSPDM_CRYPTO_NID_CURVE_X25519 0x0208
#define LIBSPDM_CRYPTO_NID_CURVE_X448 0x0209
/* AEAD */
#define LIBSPDM_CRYPTO_NID_AES_128_GCM 0x0301
#define LIBSPDM_CRYPTO_NID_AES_256_GCM 0x0302
#define LIBSPDM_CRYPTO_NID_CHACHA20_POLY1305 0x0303
#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304
/* X.509 v3 key usage extension flags. */
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000
/* These constants comply with the DER encoded ASN.1 type tags. */
#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01
#define LIBSPDM_CRYPTO_ASN1_INTEGER 0x02
#define LIBSPDM_CRYPTO_ASN1_BIT_STRING 0x03
#define LIBSPDM_CRYPTO_ASN1_OCTET_STRING 0x04
#define LIBSPDM_CRYPTO_ASN1_NULL 0x05
#define LIBSPDM_CRYPTO_ASN1_OID 0x06
#define LIBSPDM_CRYPTO_ASN1_UTF8_STRING 0x0C
#define LIBSPDM_CRYPTO_ASN1_SEQUENCE 0x10
#define LIBSPDM_CRYPTO_ASN1_SET 0x11
#define LIBSPDM_CRYPTO_ASN1_PRINTABLE_STRING 0x13
#define LIBSPDM_CRYPTO_ASN1_T61_STRING 0x14
#define LIBSPDM_CRYPTO_ASN1_IA5_STRING 0x16
#define LIBSPDM_CRYPTO_ASN1_UTC_TIME 0x17
#define LIBSPDM_CRYPTO_ASN1_GENERALIZED_TIME 0x18
#define LIBSPDM_CRYPTO_ASN1_UNIVERSAL_STRING 0x1C
#define LIBSPDM_CRYPTO_ASN1_BMP_STRING 0x1E
#define LIBSPDM_CRYPTO_ASN1_PRIMITIVE 0x00
#define LIBSPDM_CRYPTO_ASN1_CONSTRUCTED 0x20
#define LIBSPDM_CRYPTO_ASN1_CONTEXT_SPECIFIC 0x80
#define LIBSPDM_CRYPTO_ASN1_TAG_CLASS_MASK 0xC0
#define LIBSPDM_CRYPTO_ASN1_TAG_PC_MASK 0x20
#define LIBSPDM_CRYPTO_ASN1_TAG_VALUE_MASK 0x1F
#include "hal/library/cryptlib/cryptlib_hash.h"
#include "hal/library/cryptlib/cryptlib_mac.h"
#include "hal/library/cryptlib/cryptlib_aead.h"
#include "hal/library/cryptlib/cryptlib_cert.h"
#include "hal/library/cryptlib/cryptlib_hkdf.h"
#include "hal/library/cryptlib/cryptlib_rsa.h"
#include "hal/library/cryptlib/cryptlib_ec.h"
#include "hal/library/cryptlib/cryptlib_dh.h"
#include "hal/library/cryptlib/cryptlib_ecd.h"
#include "hal/library/cryptlib/cryptlib_sm2.h"
#include "hal/library/cryptlib/cryptlib_rng.h"
#endif /* CRYPTLIB_H */

View File

@@ -0,0 +1,445 @@
/**
* Copyright Notice:
* Copyright 2021-2024 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef SPDM_LIB_CONFIG_H
#define SPDM_LIB_CONFIG_H
/* Code space optimization for optional messages.
*
* An Integrator of libspdm may not need all of the optional SPDM messages. The
* LIBSPDM_ENABLE_CAPABILITY_***_CAP compile time switches allow the Integrator to enable or disable
* capabilities and messages.
*/
/* SPDM 1.0 capabilities and messages. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES
#define LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES 1
#endif
/* SPDM 1.1 capabilities. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
#define LIBSPDM_ENABLE_CAPABILITY_PSK_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
#endif
/* SPDM 1.2 capabilities. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
#endif
/* SPDM 1.3 capabilities. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEL_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MEL_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP
#define LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP
#define LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP 1
#endif
/* Includes SPDM 1.3 features for CSR messages. If enabled then LIBSPDM_ENABLE_CAPABILITY_CSR_CAP
* must also be enabled.
*/
#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX
#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX 1
#endif
/* If 1 then endpoint supports sending GET_CERTIFICATE and GET_DIGESTS requests.
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
* must also be enabled.
*/
#ifndef LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT
#define LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT 1
#endif
/* If 1 then endpoint supports sending CHALLENGE request.
* If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
* must also be enabled.
*/
#ifndef LIBSPDM_SEND_CHALLENGE_SUPPORT
#define LIBSPDM_SEND_CHALLENGE_SUPPORT 1
#endif
/* If 1 then endpoint supports sending the GET_SUPPORTED_EVENT_TYPES, SUBSCRIBE_EVENT_TYPES, and
* encapsulated EVENT_ACK messages. In addition, LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP must also be
* 1.
*/
#ifndef LIBSPDM_EVENT_RECIPIENT_SUPPORT
#define LIBSPDM_EVENT_RECIPIENT_SUPPORT 1
#endif
/* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 0 then
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then the error
* is immediately returned to the Integrator. The Requester cannot send a RESPOND_IF_READY
* request.
* - For a Responder, it cannot send a RESPOND_IF_READY ERROR response and does not support
* RESPOND_IF_READY.
* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 1 then
* - For a Requester, if the Responder sends a ResponseNotReady ERROR response then libspdm
* waits an amount of time, as specified by the RDTExponent parameter, before sending
* RESPOND_IF_READY.
* - For a Responder, if its response state is NOT_READY then it will send a ResponseNotReady
* ERROR response to the Requester, and will accept a subsequent RESPOND_IF_READY request.
*/
#ifndef LIBSPDM_RESPOND_IF_READY_SUPPORT
#define LIBSPDM_RESPOND_IF_READY_SUPPORT 1
#endif
/* Enables FIPS 140-3 mode. */
#ifndef LIBSPDM_FIPS_MODE
#define LIBSPDM_FIPS_MODE 0
#endif
/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or
* sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and
* `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`.
*
* Note that if this file is used with CMake and `DTARGET=Release` is defined, then all debugging
* is disabled.
*/
#ifndef LIBSPDM_DEBUG_ENABLE
#define LIBSPDM_DEBUG_ENABLE 1
#endif
/* The SPDM specification allows a Responder to return up to 255 version entries in the `VERSION`
* response to the Requester, including duplicate entries. For a Requester this value specifies the
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
* error. A similar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
* is not meant to be configured by the Integrator.
*/
#ifndef LIBSPDM_MAX_VERSION_COUNT
#define LIBSPDM_MAX_VERSION_COUNT 5
#endif
#if LIBSPDM_ENABLE_CAPABILITY_PSK_CAP
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and,
* if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are
* typically random or monotonically increasing numbers.
*/
#ifndef LIBSPDM_PSK_CONTEXT_LENGTH
#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE
#endif
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field. */
#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH
#define LIBSPDM_PSK_MAX_HINT_LENGTH 16
#endif
#endif /* LIBSPDM_ENABLE_CAPABILITY_PSK_CAP */
/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when
* verifying certificate chains from an endpoint. This value specifies the maximum number of root
* certificates that libspdm can support.
*/
#ifndef LIBSPDM_MAX_ROOT_CERT_SUPPORT
#define LIBSPDM_MAX_ROOT_CERT_SUPPORT 10
#endif
/* If the Responder supports it a Requester is allowed to establish multiple secure sessions with
* the Responder. This value specifies the maximum number of sessions libspdm can support.
*/
#ifndef LIBSPDM_MAX_SESSION_COUNT
#define LIBSPDM_MAX_SESSION_COUNT 4
#endif
/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a
* libspdm context.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE
#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000
#endif
#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE
#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000
#endif
/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages
* the complete certificate chain can be constructed. This value specifies the maximum size,
* in bytes, of a partial certificate that can be sent or received.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
#endif
/* Partial measurement extension log (MEL) can be retrieved from a Responder and through multiple
* messages the complete MEL can be constructed. This value specifies the maximum size, in bytes, of
* a partial MEL that can be sent or received.
*/
#ifndef LIBSPDM_MAX_MEL_BLOCK_LEN
#define LIBSPDM_MAX_MEL_BLOCK_LEN 1024
#endif
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
* cryptographic digests and signatures over multiple requests and responses. This value specifies
* whether libspdm will use a running calculation over the transcript, where requests and responses
* are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire
* transcript before calculating the digest or signature.
*
* When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 0 then a running calculation is used and less
* memory is needed.
* When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 1 then the entire transcript is buffered and more
* memory is needed.
*/
#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
#endif
/* Cryptography Configuration
* In each category, at least one should be selected.
* NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/
#ifndef LIBSPDM_RSA_SSA_2048_SUPPORT
#define LIBSPDM_RSA_SSA_2048_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_SSA_3072_SUPPORT
#define LIBSPDM_RSA_SSA_3072_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_SSA_4096_SUPPORT
#define LIBSPDM_RSA_SSA_4096_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_PSS_2048_SUPPORT
#define LIBSPDM_RSA_PSS_2048_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_PSS_3072_SUPPORT
#define LIBSPDM_RSA_PSS_3072_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_PSS_4096_SUPPORT
#define LIBSPDM_RSA_PSS_4096_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDSA_P256_SUPPORT
#define LIBSPDM_ECDSA_P256_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDSA_P384_SUPPORT
#define LIBSPDM_ECDSA_P384_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDSA_P521_SUPPORT
#define LIBSPDM_ECDSA_P521_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_DSA_P256_SUPPORT
#define LIBSPDM_SM2_DSA_P256_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT
#define LIBSPDM_EDDSA_ED25519_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED448_SUPPORT
#define LIBSPDM_EDDSA_ED448_SUPPORT 1
#endif
#ifndef LIBSPDM_FFDHE_2048_SUPPORT
#define LIBSPDM_FFDHE_2048_SUPPORT 1
#endif
#ifndef LIBSPDM_FFDHE_3072_SUPPORT
#define LIBSPDM_FFDHE_3072_SUPPORT 1
#endif
#ifndef LIBSPDM_FFDHE_4096_SUPPORT
#define LIBSPDM_FFDHE_4096_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDHE_P256_SUPPORT
#define LIBSPDM_ECDHE_P256_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDHE_P384_SUPPORT
#define LIBSPDM_ECDHE_P384_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDHE_P521_SUPPORT
#define LIBSPDM_ECDHE_P521_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT
#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_AES_128_GCM_SUPPORT
#define LIBSPDM_AEAD_AES_128_GCM_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_AES_256_GCM_SUPPORT
#define LIBSPDM_AEAD_AES_256_GCM_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT
#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA256_SUPPORT
#define LIBSPDM_SHA256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA384_SUPPORT
#define LIBSPDM_SHA384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA512_SUPPORT
#define LIBSPDM_SHA512_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_256_SUPPORT
#define LIBSPDM_SHA3_256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_384_SUPPORT
#define LIBSPDM_SHA3_384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_512_SUPPORT
#define LIBSPDM_SHA3_512_SUPPORT 1
#endif
#ifndef LIBSPDM_SM3_256_SUPPORT
#define LIBSPDM_SM3_256_SUPPORT 1
#endif
/* If 1 then endpoint supports parsing X.509 certificate chains. */
#ifndef LIBSPDM_CERT_PARSE_SUPPORT
#define LIBSPDM_CERT_PARSE_SUPPORT 1
#endif
/*
* MinDataTransferSize = 42
*
* H = HashLen = HmacLen = [32, 64]
* S = SigLen = [64, 512]
* D = ExchangeDataLen = [64, 512]
* R = RequesterContextLen >= 32
* R = ResponderContextLen >= 0
* O = OpaqueDataLen <= 1024
*
* Max Chunk No = 1, if (message size <= 42)
* Max Chunk No = [(message size + 4) / 30] roundup, if (message size > 42)
*
* +==========================+==========================================+=========+
* | Command | Size |MaxChunk |
* +==========================+==========================================+=========+
* | GET_VERSION | 4 | 1 |
* | VERSION {1.0, 1.1, 1.2} | 6 + 2 * 3 = 12 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CAPABILITIES 1.2 | 20 | 1 |
* | CAPABILITIES 1.2 | 20 | 1 |
* +--------------------------+------------------------------------------+---------+
* | ERROR | 4 | 1 |
* | ERROR(ResponseTooLarge) | 4 + 4 = 8 | 1 |
* | ERROR(LargeResponse) | 4 + 1 = 5 | 1 |
* | ERROR(ResponseNotReady) | 4 + 4 = 8 | 1 |
* +--------------------------+------------------------------------------+---------+
* | CHUNK_SEND header | 12 + L0 (0 or 4) | 1 |
* | CHUNK_RESPONSE header | 12 + L0 (0 or 4) | 1 |
* +==========================+==========================================+=========+
* | NEGOTIATE_ALGORITHMS 1.2 | 32 + 4 * 4 = 48 | 2 |
* | ALGORITHMS 1.2 | 36 + 4 * 4 = 52 | 2 |
* +--------------------------+------------------------------------------+---------+
* | GET_DIGESTS 1.2 | 4 | 1 |
* | DIGESTS 1.2 | 4 + H * SlotNum = [36, 516] | [1, 18] |
* +--------------------------+------------------------------------------+---------+
* | GET_CERTIFICATE 1.2 | 8 | 1 |
* | CERTIFICATE 1.2 | 8 + PortionLen | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | CHALLENGE 1.2 | 40 | 1 |
* | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] |
* +--------------------------+------------------------------------------+---------+
* | GET_MEASUREMENTS 1.2 | 5 + Nonce (0 or 32) | 1 |
* | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] |
* +--------------------------+------------------------------------------+---------+
* | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] |
* | KEY_EXCHANGE_RSP 1.2 | 42 + D + H + S (+ H) [+ O] = [234, 1194] | [8, 40] |
* +--------------------------+------------------------------------------+---------+
* | FINISH 1.2 | 4 (+ S) + H = [100, 580] | [4, 20] |
* | FINISH_RSP 1.2 | 4 (+ H) = [36, 69] | [1, 3] |
* +--------------------------+------------------------------------------+---------+
* | PSK_EXCHANGE 1.2 | 12 [+ PSKHint] + R [+ O] = 44 | 2 |
* | PSK_EXCHANGE_RSP 1.2 | 12 + R + H (+ H) [+ O] = [108, 172] | [4, 6] |
* +--------------------------+------------------------------------------+---------+
* | PSK_FINISH 1.2 | 4 + H = [36, 68] | [1, 3] |
* | PSK_FINISH_RSP 1.2 | 4 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CSR 1.2 | 8 + RequesterInfoLen [+ O] | [1, ] |
* | CSR 1.2 | 8 + CSRLength | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | SET_CERTIFICATE 1.2 | 4 + CertChainLen | [1, ] |
* | SET_CERTIFICATE_RSP 1.2 | 4 | 1 |
* +==========================+==========================================+=========+
*/
/* Enable message logging.
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
* for more information.
*/
#ifndef LIBSPDM_ENABLE_MSG_LOG
#define LIBSPDM_ENABLE_MSG_LOG 1
#endif
/* Enable macro checking during compilation. */
#ifndef LIBSPDM_CHECK_MACRO
#define LIBSPDM_CHECK_MACRO 0
#endif
/* Enable compilation of libspdm_check_context function. After a libspdm context has been
* configured libspdm_check_context can be called to check that its configuration is correct.
*/
#ifndef LIBSPDM_CHECK_SPDM_CONTEXT
#define LIBSPDM_CHECK_SPDM_CONTEXT 1
#endif
/* Enable passing the SPDM context to HAL functions.
* This macro will be removed when libspdm 4.0 is released.
*/
#ifndef LIBSPDM_HAL_PASS_SPDM_CONTEXT
#define LIBSPDM_HAL_PASS_SPDM_CONTEXT 0
#endif
/* Enable additional checks for certificates.
* This macro will be removed when libspdm 4.0 is released.
*/
#ifndef LIBSPDM_ADDITIONAL_CHECK_CERT
#define LIBSPDM_ADDITIONAL_CHECK_CERT 0
#endif
#endif /* SPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,477 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#include "nvspdm_cryptlib_extensions.h"
#ifdef USE_LKCA
#define BUFFER_SIZE (2 * 1024 * 1024)
#define AUTH_TAG_SIZE 16
struct lkca_aead_ctx
{
struct crypto_aead *aead;
struct aead_request *req;
char *a_data_buffer;
char *in_buffer;
char *out_buffer;
char tag[AUTH_TAG_SIZE];
};
#endif
static int libspdm_aead_prealloc(void **context, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct lkca_aead_ctx *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) {
return -ENOMEM;
}
memset(ctx, 0, sizeof(*ctx));
ctx->aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(ctx->aead)) {
pr_notice("could not allocate AEAD algorithm\n");
kfree(ctx);
return -ENODEV;
}
ctx->req = aead_request_alloc(ctx->aead, GFP_KERNEL);
if (ctx->req == NULL) {
pr_info("could not allocate skcipher request\n");
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->a_data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->a_data_buffer == NULL) {
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->in_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->in_buffer == NULL) {
kfree(ctx->a_data_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->out_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->out_buffer == NULL) {
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
*context = ctx;
return 0;
#endif
}
void libspdm_aead_free(void *context)
{
#ifdef USE_LKCA
struct lkca_aead_ctx *ctx = context;
crypto_free_aead(ctx->aead);
aead_request_free(ctx->req);
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
kfree(ctx->out_buffer);
kfree(ctx);
#endif
}
#define SG_AEAD_AAD 0
#define SG_AEAD_TEXT 1
#define SG_AEAD_SIG 2
// Number of fields in AEAD scatterlist
#define SG_AEAD_LEN 3
#ifdef USE_LKCA
// This function doesn't do any allocs, it uses temp buffers instead
static int lkca_aead_internal(struct crypto_aead *aead,
struct aead_request *req,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
struct scatterlist sg_in[],
struct scatterlist sg_out[],
size_t a_data_size,
size_t data_in_size,
size_t *data_out_size,
size_t tag_size,
bool enc)
{
DECLARE_CRYPTO_WAIT(wait);
int rc = 0;
if (crypto_aead_setkey(aead, key, key_size)) {
pr_info("key could not be set\n");
return -EINVAL;
}
if (crypto_aead_ivsize(aead) != iv_size) {
pr_info("iv could not be set\n");
return -EINVAL;
}
aead_request_set_ad(req, a_data_size);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
if (enc) {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_encrypt(req), &wait);
} else {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size + tag_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_decrypt(req), &wait);
}
if (rc != 0) {
if (enc) {
pr_info("aead.c: Encryption failed with error %i\n", rc);
} else {
pr_info("aead.c: Decryption failed with error %i\n", rc);
if (rc == -EBADMSG) {
pr_info("aead.c: Authentication tag mismatch!\n");
}
}
}
*data_out_size = data_in_size;
return rc;
}
#endif
static int libspdm_aead_prealloced(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc)
{
#ifndef USE_LKCA
return -ENODEV;
#else
int rc = 0;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
struct lkca_aead_ctx *ctx = context;
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
if (a_data_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
memcpy(ctx->a_data_buffer, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], ctx->in_buffer, data_in_size);
memcpy(ctx->in_buffer, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], ctx->out_buffer, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
// Tag is small enough that memcpy is cheaper than checking if page is virtual
if(tag_size > AUTH_TAG_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], ctx->tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], ctx->tag, tag_size);
if(!enc)
memcpy(ctx->tag, tag, tag_size);
rc = lkca_aead_internal(ctx->aead, ctx->req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc) {
memcpy(tag, ctx->tag, tag_size);
}
if (!virt_addr_valid(data_out)) {
memcpy(data_out, ctx->out_buffer, data_in_size);
}
return rc;
#endif
}
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct crypto_aead *aead = NULL;
struct aead_request *req = NULL;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
uint8_t *a_data_shadow = NULL;
uint8_t *data_in_shadow = NULL;
uint8_t *data_out_shadow = NULL;
uint8_t *tag_shadow = NULL;
int rc = 0;
aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(aead)) {
pr_notice("could not allocate AEAD algorithm\n");
return -ENODEV;
}
req = aead_request_alloc(aead, GFP_KERNEL);
if (req == NULL) {
pr_info("could not allocate skcipher request\n");
rc = -ENOMEM;
goto out;
}
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
a_data_shadow = kmalloc(a_data_size, GFP_KERNEL);
if (a_data_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data_shadow, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data_shadow, a_data_size);
memcpy(a_data_shadow, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
data_in_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_in_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in_shadow, data_in_size);
memcpy(data_in_shadow, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
data_out_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_out_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out_shadow, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
if (!virt_addr_valid(tag)) {
tag_shadow = kmalloc(tag_size, GFP_KERNEL);
if (tag_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], tag_shadow, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag_shadow, tag_size);
if(!enc)
memcpy(tag_shadow, tag, tag_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_SIG], tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag, tag_size);
}
rc = lkca_aead_internal(aead, req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc && (tag_shadow != NULL))
memcpy((uint8_t *) tag, tag_shadow, tag_size);
if (data_out_shadow != NULL)
memcpy(data_out, data_out_shadow, data_in_size);
out:
if (a_data_shadow != NULL)
kfree(a_data_shadow);
if (data_in_shadow != NULL)
kfree(data_in_shadow);
if (data_out != NULL)
kfree(data_out_shadow);
if (tag != NULL)
kfree(tag_shadow);
if (aead != NULL)
crypto_free_aead(aead);
if (req != NULL)
aead_request_free(req);
return rc;
#endif
}
// Wrapper to make look like libspdm
bool libspdm_aead_gcm_prealloc(void **context)
{
return libspdm_aead_prealloc(context, "gcm(aes)") == 0;
}
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
tag_out, tag_size, data_out, data_out_size, true);
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
(uint8_t *) tag, tag_size, data_out, data_out_size, false);
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,117 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag_out, tag_size,
data_out, data_out_size, true, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag, tag_size,
data_out, data_out_size, false, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,172 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
static bool lkca_ecdsa_sign(void *ec_context,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
return false;
}
bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
if (ec_context == NULL || public_key == NULL) {
return false;
}
return lkca_ec_set_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
if (ec_context == NULL || public_key_size == NULL) {
return false;
}
if (public_key == NULL && *public_key_size != 0) {
return false;
}
return lkca_ec_get_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_check_key(const void *ec_context)
{
/* TBD*/
return true;
}
bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
if (ec_context == NULL || public_size == NULL) {
return false;
}
if (public_data == NULL && *public_size != 0) {
return false;
}
return lkca_ec_generate_key(ec_context, public_data, public_size);
}
bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
if (ec_context == NULL || peer_public == NULL || key_size == NULL ||
key == NULL) {
return false;
}
if (peer_public_size > INT_MAX) {
return false;
}
return lkca_ec_compute_key(ec_context, peer_public, peer_public_size, key,
key_size);
}
bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
if (ec_context == NULL || message_hash == NULL) {
return false;
}
if (signature == NULL) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_sign(ec_context, message_hash, hash_size, signature, sig_size);
}
bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
if (ec_context == NULL || message_hash == NULL || signature == NULL) {
return false;
}
if (sig_size > INT_MAX || sig_size == 0) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_verify(ec_context, hash_nid, message_hash, hash_size,
signature, sig_size);
}

View File

@@ -0,0 +1,410 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <linux/module.h>
MODULE_SOFTDEP("pre: ecdh_generic,ecdsa_generic");
#include <crypto/akcipher.h>
#include <crypto/ecdh.h>
#include <crypto/internal/ecc.h>
#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
#include <crypto/sig.h>
struct signature
{
u64 r[ECC_MAX_DIGITS];
u64 s[ECC_MAX_DIGITS];
};
#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
#define ECDSA_PUBKEY_HEADER_XY_PRESENT (0x4)
struct ecc_ctx {
unsigned int curve_id;
u64 priv_key[ECC_MAX_DIGITS]; // In big endian
struct {
// ecdsa pubkey has header indicating length of pubkey
u8 padding[7];
u8 pub_key_prefix;
u64 pub_key[2 * ECC_MAX_DIGITS];
};
bool pub_key_set;
bool priv_key_set;
char const *name;
int size;
};
#endif // USE_LKCA
void *libspdm_ec_new_by_nid(size_t nid)
{
#ifndef USE_LKCA
return NULL;
#else
struct ecc_ctx *ctx;
if ((nid != LIBSPDM_CRYPTO_NID_SECP256R1) && (nid != LIBSPDM_CRYPTO_NID_SECP384R1)){
return NULL;
}
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
return NULL;
}
if (nid == LIBSPDM_CRYPTO_NID_SECP256R1) {
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->size = 64;
ctx->name = "ecdsa-nist-p256";
} else {
ctx->curve_id = ECC_CURVE_NIST_P384;
ctx->size = 96;
ctx->name = "ecdsa-nist-p384";
}
ctx->pub_key_set = false;
ctx->priv_key_set = false;
return ctx;
#endif // USE_LKCA
}
void libspdm_ec_free(void *ec_context)
{
#ifdef USE_LKCA
kfree(ec_context);
#endif
}
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = context;
unsigned int ndigits = ctx->size / 16;
if (key_size != (ctx->size / 2)) {
return false;
}
memcpy(ctx->priv_key, key, key_size);
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
ctx->pub_key_set = true;
ctx->priv_key_set = true;
return true;
#endif // USE_LKCA
}
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
struct ecc_point pub_key;
unsigned int ndigits;
if (public_key_size != ctx->size) {
return false;
}
// We can reuse pub_key for now
ndigits = ctx->size / 16;
pub_key = ECC_POINT_INIT(ctx->pub_key, ctx->pub_key + ndigits, ndigits);
ecc_swap_digits(public_key, ctx->pub_key, ndigits);
ecc_swap_digits(((u64 *)public_key) + ndigits, ctx->pub_key + ndigits, ndigits);
if(ecc_is_pubkey_valid_full(ecc_get_curve(ctx->curve_id), &pub_key)) {
return false;
}
memcpy(ctx->pub_key, public_key, public_key_size);
ctx->pub_key_set = true;
return true;
#endif // USE_LKCA
}
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (*public_key_size < ctx->size) {
*public_key_size = ctx->size;
return false;
}
*public_key_size = ctx->size;
memcpy(public_key, ctx->pub_key, ctx->size);
return true;
#endif // USE_LKCA
}
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
unsigned int ndigits = ctx->size / 16;
if(ecc_gen_privkey(ctx->curve_id, ndigits, ctx->priv_key)) {
return false;
}
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
memcpy(public_data, ctx->pub_key, ctx->size);
*public_size = ctx->size;
ctx->priv_key_set = true;
ctx->pub_key_set = true;
return true;
#endif // USE_LKCA
}
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (peer_public_size != ctx->size) {
return false;
}
if (!ctx->priv_key_set) {
return false;
}
if ((ctx->size / 2) > *key_size) {
return false;
}
if (crypto_ecdh_shared_secret(ctx->curve_id, ctx->size / 16,
(const u64 *) ctx->priv_key,
(const u64 *) peer_public,
(u64 *) key)) {
return false;
}
*key_size = ctx->size / 2;
return true;
#endif // USE_LKCA
}
#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
static bool lkca_ecdsa_verify_crypto_sig(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
#ifndef USE_LKCA
return false;
#else // USE_LKCA
struct ecc_ctx *ctx = ec_context;
u8 *pub_key;
int err;
DECLARE_CRYPTO_WAIT(wait);
struct crypto_sig * tfm = NULL;
struct signature sig;
if (sig_size != ctx->size || !ctx->pub_key_set)
{
return false;
}
tfm = crypto_alloc_sig(ctx->name, CRYPTO_ALG_TYPE_SIG, 0);
if (IS_ERR(tfm)) {
pr_info("crypto_alloc_sig failed in lkca_ecdsa_verify\n");
return false;
}
// modify header of pubkey to indicate size
pub_key = (u8 *) &(ctx->pub_key_prefix);
*pub_key = ECDSA_PUBKEY_HEADER_XY_PRESENT;
err = crypto_sig_set_pubkey(tfm, pub_key, ctx->size + 1);
if (err != 0)
{
pr_info("crypto_sig_set_pubkey failed in lkca_ecdsa_verify: %d", -err);
goto failTfm;
}
//
// Compared to the way we receive the signature, we need to:
// - swap order of all digits
// - swap endianness for each digit
//
memset(&sig, 0, sizeof(sig));
ecc_digits_from_bytes(signature, ctx->size/2, sig.r, ECC_MAX_DIGITS);
ecc_digits_from_bytes(signature + ctx->size/2, ctx->size/2, sig.s, ECC_MAX_DIGITS);
err = crypto_sig_verify(tfm, (void *)&sig, sizeof(sig), message_hash, hash_size);
if (err != 0)
{
pr_info("crypto_sig_verify failed in lkca_ecdsa_verify %d\n", -err);
}
failTfm:
crypto_free_sig(tfm);
return err == 0;
#endif // USE_LKCA
}
#else // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
static bool lkca_ecdsa_verify_akcipher(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
#ifndef USE_LKCA
return false;
#else // USE_LKCA
struct ecc_ctx *ctx = ec_context;
u8 *pub_key;
int err;
DECLARE_CRYPTO_WAIT(wait);
// Roundabout way
u64 ber_max_len = 3 + 2 * (4 + (ECC_MAX_BYTES));
u64 ber_len = 0;
u8 *ber = NULL;
struct akcipher_request *req = NULL;
struct crypto_akcipher *tfm = NULL;
struct scatterlist sg;
if (sig_size != ctx->size) {
return false;
}
if(ctx->pub_key_set == false){
return false;
}
tfm = crypto_alloc_akcipher(ctx->name, CRYPTO_ALG_TYPE_AKCIPHER, 0);
if (IS_ERR(tfm)) {
pr_info("crypto_alloc_akcipher failed in lkca_ecdsa_verify\n");
return false;
}
// modify header of pubkey to indicate size
pub_key = (u8 *) &(ctx->pub_key_prefix);
*pub_key = ECDSA_PUBKEY_HEADER_XY_PRESENT;
if ((err = crypto_akcipher_set_pub_key(tfm, pub_key, ctx->size + 1)) != 0) {
pr_info("crypto_akcipher_set_pub_key failed in lkca_ecdsa_verify: %d\n", -err);
goto failTfm;
}
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (IS_ERR(req)) {
pr_info("akcipher_request_alloc failed in lkca_ecdsa_verify\n");
goto failTfm;
}
// We concatenate signature and hash and ship it to kernel
ber = kmalloc(ber_max_len + hash_size, GFP_KERNEL);
if (ber == NULL) {
goto failReq;
}
// XXX: NOTE THIS WILL WORK ONLY FOR 256 AND 384 bits. For larger keys
// length field will be longer than 1 byte and I haven't taken care of that!
// Signature
ber[ber_len++] = 0x30;
ber[ber_len++] = 2 * (2 + ctx->size / 2);
ber[ber_len++] = 0x02;
if (signature[0] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature, sig_size / 2);
ber_len += sig_size / 2;
ber[ber_len++] = 0x02;
if (signature[sig_size / 2] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature + sig_size / 2, sig_size / 2);
ber_len += sig_size / 2;
// Just append hash, for scatterlists it can't be on stack anyway
memcpy(ber + ber_len, message_hash, hash_size);
sg_init_one(&sg, ber, ber_len + hash_size);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
akcipher_request_set_crypt(req, &sg, NULL, ber_len, hash_size);
err = crypto_wait_req(crypto_akcipher_verify(req), &wait);
if (err != 0){
pr_info("crypto_akcipher_verify failed in lkca_ecdsa_verify %d\n", -err);
}
kfree(ber);
failReq:
akcipher_request_free(req);
failTfm:
crypto_free_akcipher(tfm);
return err == 0;
#endif // USE_LKCA
}
#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
return lkca_ecdsa_verify_crypto_sig(ec_context, hash_nid, message_hash, hash_size,
signature, sig_size);
#else // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
return lkca_ecdsa_verify_akcipher(ec_context, hash_nid, message_hash, hash_size,
signature, sig_size);
#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT
}

View File

@@ -0,0 +1,158 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// RFC 5869 has some very non-intuitive points, reading it is advised
static bool lkca_hkdf_expand_only(struct crypto_shash *alg,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
int i;
uint8_t ctr = 1;
uint8_t tmp[HASH_MAX_DIGESTSIZE];
SHASH_DESC_ON_STACK(desc, alg);
desc->tfm = alg;
ret = crypto_shash_setkey(desc->tfm, prk, prk_size);
if (ret != 0) {
pr_info("key size mismatch %ld\n", prk_size);
return false;
}
for (i = 0, ctr = 1; i < out_size; i += prk_size, ctr++) {
ret = crypto_shash_init(desc);
if (ret) {
return false;
}
if (i != 0) {
ret = crypto_shash_update(desc, out + i - prk_size, prk_size);
if (ret) {
return false;
}
}
if (info_size > 0) {
ret = crypto_shash_update(desc, info, info_size);
if (ret) {
return false;
}
}
ret = crypto_shash_update(desc, &ctr, 1);
if (ret)
return false;
if ((out_size - i) < prk_size) {
ret = crypto_shash_final(desc, tmp);
if (ret) {
return false;
}
memcpy(out + i, tmp, out_size - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
ret = crypto_shash_final(desc, out + i);
if (ret) {
return false;
}
}
}
return true;
#endif
}
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret = 0;
struct crypto_shash *alg;
uint8_t prk[HASH_MAX_DIGESTSIZE];
if (key == NULL || salt == NULL || info == NULL || out == NULL ||
key_size > sizeof(prk) || salt_size > INT_MAX || info_size > INT_MAX ||
out_size > (sizeof(prk) * 255)) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, salt, salt_size);
if (ret != 0) {
goto out;
}
ret = crypto_shash_tfm_digest(alg, key, key_size, prk);
if (ret != 0) {
goto out;
}
ret = !lkca_hkdf_expand_only(alg, prk, crypto_shash_digestsize(alg), info, info_size, out, out_size);
out:
crypto_free_shash(alg);
return ret == 0;
#endif
}
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
bool ret = false;
struct crypto_shash *alg;
if (prk == NULL || info == NULL || out == NULL || prk_size > (512 / 8) ||
info_size > INT_MAX || (out_size > (prk_size * 255))) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = lkca_hkdf_expand_only(alg, prk, prk_size, info, info_size, out, out_size);
crypto_free_shash(alg);
return ret;
#endif
}

View File

@@ -0,0 +1,111 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_hkdf_sha256_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha256)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (256 / 8))
return false;
return libspdm_hmac_sha256_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha256)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha384)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (384 / 8))
return false;
return libspdm_hmac_sha384_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha384)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha512_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha512)", key, key_size,
salt, salt_size, info, info_size, out,
out_size);
}
bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (512 / 8))
return false;
return libspdm_hmac_sha512_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha512)", prk, prk_size, info, info_size,
out, out_size);
}

View File

@@ -0,0 +1,282 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_hmac_sha256_new(void)
{
return lkca_hash_new("hmac(sha256)");
}
void libspdm_hmac_sha256_free(void *hmac_sha256_ctx)
{
lkca_hash_free(hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha256_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha256_ctx, key, key_size);
}
bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx,
void *new_hmac_sha256_ctx)
{
if (hmac_sha256_ctx == NULL || new_hmac_sha256_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha256_ctx, hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha256_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha256_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha256_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha256_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha256)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha384_new(void)
{
return lkca_hash_new("hmac(sha384)");
}
void libspdm_hmac_sha384_free(void *hmac_sha384_ctx)
{
lkca_hash_free(hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha384_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha384_ctx, key, key_size);
}
bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx,
void *new_hmac_sha384_ctx)
{
if (hmac_sha384_ctx == NULL || new_hmac_sha384_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha384_ctx, hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha384_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha384_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha384_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha384_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha384)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha512_new(void)
{
return lkca_hash_new("hmac(sha512)");
}
void libspdm_hmac_sha512_free(void *hmac_sha512_ctx)
{
lkca_hash_free(hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha512_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha512_ctx, key, key_size);
}
bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx,
void *new_hmac_sha512_ctx)
{
if (new_hmac_sha512_ctx == NULL || new_hmac_sha512_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha512_ctx, hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha512_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha512_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha512_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha512_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha512)", key, key_size, data, data_size, hmac_value);
}

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "os-interface.h"
#include "internal_crypt_lib.h"
#include "library/cryptlib.h"
bool libspdm_check_crypto_backend(void)
{
#ifdef USE_LKCA
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA wrappers found.\n");
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA calls may still fail if modules have not been loaded!\n");
return true;
#else
nv_printf(NV_DBG_ERRORS, "libspdm_check_crypto_backend: Error - libspdm expects LKCA but found stubs!\n");
return false;
#endif
}

View File

@@ -0,0 +1,37 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// This is non-gpl symbol and not part of LKCA so no need to stub it out
bool libspdm_random_bytes(uint8_t *output, size_t size)
{
get_random_bytes(output, size);
return true;
}
// This is specifically allowed by spdm
bool libspdm_random_seed(const uint8_t *seed, size_t seed_size)
{
return true;
}

View File

@@ -0,0 +1,613 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#include "library/cryptlib.h"
#ifdef USE_LKCA
#include <linux/module.h>
#include <linux/mpi.h>
#include <linux/random.h>
#include <crypto/akcipher.h>
#include <crypto/internal/rsa.h>
/* ------------------------ Macros & Defines ------------------------------- */
#define GET_MOST_SIGNIFICANT_BIT(keySize) (keySize > 0 ? ((keySize - 1) & 7) : 0)
#define GET_ENC_MESSAGE_SIZE_BYTE(keySize) (keySize + 7) >> 3;
#define PKCS1_MGF1_COUNTER_SIZE_BYTE (4)
#define RSA_PSS_PADDING_ZEROS_SIZE_BYTE (8)
#define RSA_PSS_TRAILER_FIELD (0xbc)
#define SHIFT_RIGHT_AND_GET_BYTE(val, x) ((val >> x) & 0xFF)
#ifndef BITS_TO_BYTES
#define BITS_TO_BYTES(b) (b >> 3)
#endif
static const unsigned char zeroes[RSA_PSS_PADDING_ZEROS_SIZE_BYTE] = { 0 };
struct rsa_ctx
{
struct rsa_key key;
bool pub_key_set;
bool priv_key_set;
int size;
};
#endif // #ifdef USE_LKCA
/*!
* Creating and initializing a RSA context.
*
* @return : A void pointer points to a RSA context
*
*/
void *libspdm_rsa_new
(
void
)
{
#ifndef USE_LKCA
return NULL;
#else
struct rsa_ctx *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
{
return NULL;
}
memset(ctx, 0, sizeof(*ctx));
ctx->pub_key_set = false;
ctx->priv_key_set = false;
return ctx;
#endif
}
/*!
* To free a RSA context.
*
* @param rsa_context : A RSA context pointer
*
*/
void libspdm_rsa_free
(
void *rsa_context
)
{
#ifdef USE_LKCA
struct rsa_ctx *ctx = rsa_context;
if (ctx != NULL)
{
if (ctx->key.n) kfree(ctx->key.n);
if (ctx->key.e) kfree(ctx->key.e);
if (ctx->key.d) kfree(ctx->key.d);
if (ctx->key.q) kfree(ctx->key.q);
if (ctx->key.p) kfree(ctx->key.p);
if (ctx->key.dq) kfree(ctx->key.dq);
if (ctx->key.dp) kfree(ctx->key.dp);
if (ctx->key.qinv) kfree(ctx->key.qinv);
kfree(ctx);
}
#endif
}
#define rsa_set_key_case(a, a_sz, A) \
case A: \
{ \
if (ctx->key.a) { \
kfree(ctx->key.a); \
} \
ctx->key.a = shadow_num; \
ctx->key.a_sz = bn_size; \
break; \
}
/*!
* To set key into RSA context.
*
* @param rsa_context : A RSA context pointer
* @param key_tag : Indicate key tag for RSA key
* @param big_number : A big nuMber buffer to store rsa KEY
* @param bn_size : The size of bug number
*
* @Return : True if OK; otherwise return False
*/
bool libspdm_rsa_set_key
(
void *rsa_context,
const libspdm_rsa_key_tag_t key_tag,
const uint8_t *big_number,
size_t bn_size
)
{
#ifndef USE_LKCA
return false;
#else
struct rsa_ctx *ctx = rsa_context;
uint8_t *shadow_num;
if (ctx == NULL)
{
return false;
}
// Quick sanity check if tag is valid
switch (key_tag)
{
case LIBSPDM_RSA_KEY_N:
case LIBSPDM_RSA_KEY_E:
case LIBSPDM_RSA_KEY_D:
case LIBSPDM_RSA_KEY_Q:
case LIBSPDM_RSA_KEY_P:
case LIBSPDM_RSA_KEY_DP:
case LIBSPDM_RSA_KEY_DQ:
case LIBSPDM_RSA_KEY_Q_INV:
break;
default:
return false;
break;
}
if (big_number != NULL)
{
shadow_num = kmalloc(bn_size, GFP_KERNEL);
if (shadow_num == NULL)
{
return false;
}
memcpy(shadow_num, big_number, bn_size);
}
else
{
shadow_num = NULL;
bn_size = 0;
}
switch (key_tag)
{
rsa_set_key_case(n, n_sz, LIBSPDM_RSA_KEY_N)
rsa_set_key_case(e, e_sz, LIBSPDM_RSA_KEY_E)
rsa_set_key_case(d, d_sz, LIBSPDM_RSA_KEY_D)
rsa_set_key_case(q, q_sz, LIBSPDM_RSA_KEY_Q)
rsa_set_key_case(p, p_sz, LIBSPDM_RSA_KEY_P)
rsa_set_key_case(dq, dq_sz, LIBSPDM_RSA_KEY_DQ)
rsa_set_key_case(dp, dp_sz, LIBSPDM_RSA_KEY_DP)
rsa_set_key_case(qinv, qinv_sz, LIBSPDM_RSA_KEY_Q_INV)
default:
// We can't get here ever
break;
}
return true;
#endif
}
/*!
* Perform PKCS1 MGF1 operation.
*
* @param mask : A mask pointer to store return data
* @param maskedDB_length : Indicate mask data block length
* @param seed : A seed pointer to store random values
* @param seed_length : The seed length
* @param hash_nid : The hash NID
*
* @Return : True if OK; otherwise return False
*/
static bool NV_PKCS1_MGF1
(
uint8_t *mask,
size_t maskedDB_length,
const uint8_t *seed,
size_t seed_length,
size_t hash_nid
)
{
#ifndef USE_LKCA
return false;
#else
size_t mdLength;
size_t counter;
size_t outLength;
uint8_t counterBuf[4];
void *sha384_ctx = NULL;
uint8_t hash_value[LIBSPDM_SHA384_DIGEST_SIZE];
bool status = false;
if (mask == NULL || seed == NULL)
{
return false;
}
// Only support SHA384 for MGF1 now.
if (hash_nid == LIBSPDM_CRYPTO_NID_SHA384)
{
mdLength = LIBSPDM_SHA384_DIGEST_SIZE;
}
else
{
return false;
}
sha384_ctx = libspdm_sha384_new();
if (sha384_ctx == NULL)
{
pr_err("%s : libspdm_sha384_new() failed \n", __FUNCTION__);
return false;
}
for (counter = 0, outLength = 0; outLength < maskedDB_length; counter++)
{
counterBuf[0] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 24);
counterBuf[1] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 16);
counterBuf[2] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 8);
counterBuf[3] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 0);
status = libspdm_sha384_init(sha384_ctx);
if (!status)
{
pr_err("%s: libspdm_sha384_init() failed !! \n", __FUNCTION__);
goto _error_exit;
}
status = libspdm_sha384_update(sha384_ctx, seed, seed_length);
if (!status)
{
pr_err("%s: libspdm_sha384_update() failed(seed) !! \n", __FUNCTION__);
goto _error_exit;
}
status = libspdm_sha384_update(sha384_ctx, counterBuf, 4);
if (!status)
{
pr_err("%s: libspdm_sha384_update() failed(counterBuf) !! \n", __FUNCTION__);
goto _error_exit;
}
if (outLength + mdLength <= maskedDB_length)
{
status = libspdm_sha384_final(sha384_ctx, mask + outLength);
if (!status)
{
pr_err("%s: libspdm_sha384_final() failed (<= maskedDB_length) !! \n", __FUNCTION__);
goto _error_exit;
}
outLength += mdLength;
}
else
{
status = libspdm_sha384_final(sha384_ctx, hash_value);
if (!status)
{
pr_err("%s: libspdm_sha384_final() failed(> maskedDB_length) !! \n", __FUNCTION__);
goto _error_exit;
}
memcpy(mask + outLength, hash_value, maskedDB_length - outLength);
outLength = maskedDB_length;
}
}
status = true;
_error_exit:
libspdm_sha384_free(sha384_ctx);
return status;
#endif
}
/*
0xbc : Trailer Field
+-----------+
| M |
+-----------+
|
V
Hash
|
V
+--------+----------+----------+
M' = |Padding1| mHash | salt |
+--------+----------+----------+
|--------------|---------------|
|
+--------+----------+ V
DB = |Padding2| salt | Hash
+--------+----------+ |
| |
V |
xor <--- MGF <---|
| |
| |
V V
+-------------------+----------+----+
EM = | maskedDB | H |0xbc|
+-------------------+----------+----+
salt : The random number, we hardcode its size as hash size here.
M' : The concatenation of padding1 + message hash + salt
MGF : Mask generation function.
A mask generation function takes an octet string of variable length
and a desired output length as input, and outputs an octet string of
the desired length
MGF1 is a Mask Generation Function based on a hash function.
Padding1 : 8 zeros
Padding2 : 0x01
The detail spec is at https://datatracker.ietf.org/doc/html/rfc2437
*/
/*!
* Set keys and call PKCS1_MGF1 to generate signature.
*
* @param rsa_context : A RSA context pointer
* @param hash_nid : The hash NID
* @param message_hash : The pointer to message hash
* @param signature : The pointer is used to store generated signature
* @param sig_size : For input, a pointer store signature buffer size.
* For output, a pointer store generate signature size.
* @param salt_Length : The salt length for RSA-PSS algorithm
*
* @Return : True if OK; otherwise return False
*/
static bool nvRsaPaddingAddPkcs1PssMgf1
(
void *rsa_context,
size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size,
uint8_t *signature,
size_t *sig_size,
int salt_length
)
{
#ifndef USE_LKCA
return false;
#else
bool status = false;
struct rsa_ctx *ctx = rsa_context;
void *sha384_ctx = NULL;
uint32_t keySize;
uint32_t msBits;
size_t emLength;
uint8_t saltBuf[64];
size_t maskedDB_length;
size_t i;
uint8_t *tmp_H;
uint8_t *tmp_P;
int rc;
unsigned int ret_data_size;
MPI mpi_n = NULL;
MPI mpi_d = NULL;
MPI mpi_c = mpi_alloc(0);
MPI mpi_p = mpi_alloc(0);
// read modulus to BN struct
mpi_n = mpi_read_raw_data(ctx->key.n, ctx->key.n_sz);
if (mpi_n == NULL)
{
pr_err("%s : mpi_n create failed !! \n", __FUNCTION__);
goto _error_exit;
}
// read private exponent to BN struct
mpi_d = mpi_read_raw_data(ctx->key.d, ctx->key.d_sz);
if (mpi_d == NULL)
{
pr_err("%s : mpi_d create failed !! \n", __FUNCTION__);
goto _error_exit;
}
keySize = mpi_n->nbits;
msBits = GET_MOST_SIGNIFICANT_BIT(keySize);
emLength = BITS_TO_BYTES(keySize);
if (msBits == 0)
{
*signature++ = 0;
emLength--;
}
if (emLength < hash_size + 2)
{
pr_err("%s : emLength < hash_size + 2 !! \n", __FUNCTION__);
goto _error_exit;
}
// Now, we only support salt_length == LIBSPDM_SHA384_DIGEST_SIZE
if (salt_length != LIBSPDM_SHA384_DIGEST_SIZE ||
hash_nid != LIBSPDM_CRYPTO_NID_SHA384)
{
pr_err("%s : Invalid salt_length (%x) \n", __FUNCTION__, salt_length);
goto _error_exit;
}
get_random_bytes(saltBuf, salt_length);
maskedDB_length = emLength - hash_size - 1;
tmp_H = signature + maskedDB_length;
sha384_ctx = libspdm_sha384_new();
if (sha384_ctx == NULL)
{
pr_err("%s : libspdm_sha384_new() failed !! \n", __FUNCTION__);
goto _error_exit;
}
status = libspdm_sha384_init(sha384_ctx);
if (!status)
{
pr_err("%s : libspdm_sha384_init() failed !! \n", __FUNCTION__);
goto _error_exit;
}
status = libspdm_sha384_update(sha384_ctx, zeroes, sizeof(zeroes));
if (!status)
{
pr_err("%s : libspdm_sha384_update() with zeros failed !!\n", __FUNCTION__);
goto _error_exit;
}
status = libspdm_sha384_update(sha384_ctx, message_hash, hash_size);
if (!status)
{
pr_err("%s: libspdm_sha384_update() with message_hash failed !!\n", __FUNCTION__);
goto _error_exit;
}
if (salt_length)
{
status = libspdm_sha384_update(sha384_ctx, saltBuf, salt_length);
if (!status)
{
pr_err("%s : libspdm_sha384_update() with saltBuf failed !!\n", __FUNCTION__);
goto _error_exit;
}
}
status = libspdm_sha384_final(sha384_ctx, tmp_H);
if (!status)
{
pr_err("%s : libspdm_sha384_final() with tmp_H failed !!\n", __FUNCTION__);
goto _error_exit;
}
/* Generate dbMask in place then perform XOR on it */
status = NV_PKCS1_MGF1(signature, maskedDB_length, tmp_H, hash_size, hash_nid);
if (!status)
{
pr_err("%s : NV_PKCS1_MGF1() failed \n", __FUNCTION__);
goto _error_exit;
}
tmp_P = signature;
tmp_P += emLength - salt_length - hash_size - 2;
*tmp_P++ ^= 0x1;
if (salt_length > 0)
{
for (i = 0; i < salt_length; i++)
{
*tmp_P++ ^= saltBuf[i];
}
}
if (msBits)
{
signature[0] &= 0xFF >> (8 - msBits);
}
/* H is already in place so just set final 0xbc */
signature[emLength - 1] = RSA_PSS_TRAILER_FIELD;
// read signature to BN struct
mpi_p = mpi_read_raw_data(signature, emLength);
if (mpi_p == NULL)
{
pr_err("%s : mpi_p() create failed !!\n", __FUNCTION__);
goto _error_exit;
}
// Staring RSA encryption with private key over signature.
rc = mpi_powm(mpi_c, mpi_p, mpi_d, mpi_n);
if (rc != 0)
{
pr_err("%s : mpi_powm() failed \n", __FUNCTION__);
goto _error_exit;
}
rc = mpi_read_buffer(mpi_c, signature, *sig_size, &ret_data_size, NULL);
if (rc != 0)
{
pr_err("%s : mpi_read_buffer() failed \n", __FUNCTION__);
goto _error_exit;
}
if (ret_data_size > *sig_size)
{
goto _error_exit;
}
*sig_size = ret_data_size;
status = true;
_error_exit:
mpi_free(mpi_n);
mpi_free(mpi_d);
mpi_free(mpi_c);
mpi_free(mpi_p);
libspdm_sha384_free(sha384_ctx);
return status;
#endif
}
/*!
* Perform RSA-PSS sigaature sign process with LKCA library.
*
* @param rsa_context : A RSA context pointer
* @param hash_nid : The hash NID
* @param message_hash : The pointer to message hash
* @param signature : The pointer is used to store generated signature
* @param sig_size : For input, a pointer store signature buffer size.
* For output, a pointer store generate signature size.
*
* @Return : True if OK; otherwise return False
*/
bool lkca_rsa_pss_sign
(
void *rsa_context,
size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size,
uint8_t *signature,
size_t *sig_size
)
{
#ifndef USE_LKCA
return true;
#else
return nvRsaPaddingAddPkcs1PssMgf1(rsa_context,
hash_nid,
message_hash,
hash_size,
signature,
sig_size,
LIBSPDM_SHA384_DIGEST_SIZE);
#endif
}

View File

@@ -0,0 +1,85 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
/** @file
* RSA Asymmetric Cipher Wrapper Implementation.
*
* This file implements following APIs which provide more capabilities for RSA:
* 1) rsa_pss_sign
*
* RFC 8017 - PKCS #1: RSA Cryptography Specifications version 2.2
**/
#include "internal_crypt_lib.h"
#include "library/cryptlib.h"
/**
* Carries out the RSA-PSS signature generation with EMSA-PSS encoding scheme.
*
* This function carries out the RSA-PSS signature generation with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
*
**/
bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
return lkca_rsa_pss_sign(rsa_context, hash_nid, message_hash, hash_size,
signature, sig_size);
}
//
// In RM, we just need sign process; so we stub verification function.
// Verification function is needed in GSP code only,
//
bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
return false;
}

View File

@@ -0,0 +1,264 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_sha256_new(void)
{
return lkca_hash_new("sha256");
}
void libspdm_sha256_free(void *sha256_ctx)
{
lkca_hash_free(sha256_ctx);
}
bool libspdm_sha256_init(void *sha256_context)
{
return crypto_shash_init(sha256_context) == 0;
}
bool libspdm_sha256_duplicate(const void *sha256_context,
void *new_sha256_context)
{
if (sha256_context == NULL || new_sha256_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha256_context, sha256_context);
}
bool libspdm_sha256_update(void *sha256_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha256_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha256_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value)
{
int32_t ret;
if (sha256_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha256_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha256", data, data_size, hash_value);
}
void *libspdm_sha384_new(void)
{
return lkca_hash_new("sha384");
}
void libspdm_sha384_free(void *sha384_ctx)
{
lkca_hash_free(sha384_ctx);
}
bool libspdm_sha384_init(void *sha384_context)
{
return crypto_shash_init(sha384_context) == 0;
}
bool libspdm_sha384_duplicate(const void *sha384_context,
void *new_sha384_context)
{
if (sha384_context == NULL || new_sha384_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha384_context, sha384_context);
}
bool libspdm_sha384_update(void *sha384_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha384_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha384_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value)
{
int32_t ret;
if (sha384_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha384_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha384", data, data_size, hash_value);
}
void *libspdm_sha512_new(void)
{
return lkca_hash_new("sha512");
}
void libspdm_sha512_free(void *sha512_ctx)
{
lkca_hash_free(sha512_ctx);
}
bool libspdm_sha512_init(void *sha512_context)
{
return crypto_shash_init(sha512_context) == 0;
}
bool libspdm_sha512_duplicate(const void *sha512_context,
void *new_sha512_context)
{
if (sha512_context == NULL || new_sha512_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha512_context, sha512_context);
}
bool libspdm_sha512_update(void *sha512_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha512_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha512_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value)
{
int32_t ret;
if (sha512_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha512_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha512", data, data_size, hash_value);
}

View File

@@ -0,0 +1,181 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#ifndef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
#include <crypto/internal/hash.h>
#endif
#endif
void *lkca_hash_new(const char* alg_name)
{
#ifndef USE_LKCA
return NULL;
#else
//XXX: can we reuse crypto_shash part and just allocate desc
struct crypto_shash *alg;
struct shash_desc *desc;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
printk (KERN_INFO "Failed to alloc %s\n", alg_name);
return NULL;
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(alg), GFP_KERNEL);
if (desc == NULL){
printk (KERN_INFO "Kernel out of mem\n");
crypto_free_shash(alg);
return NULL;
}
desc->tfm = alg;
return desc;
#endif
}
void lkca_hash_free(struct shash_desc *ctx)
{
#ifndef USE_LKCA
#else
crypto_free_shash(ctx->tfm);
kfree(ctx);
#endif
}
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
SHASH_DESC_ON_STACK(tmp, src);
if (crypto_shash_export((struct shash_desc *) src, tmp)) {
return false;
}
if (crypto_shash_import(dst, tmp)) {
return false;
}
return true;
#endif
}
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
// in LKCA hmac export doesn't export ipad/opad, so we need to WAR it
struct crypto_shash *src_tfm = src->tfm;
struct crypto_shash *dst_tfm = dst->tfm;
int ss = crypto_shash_statesize(dst_tfm);
#ifdef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
#else
int ctx_size = crypto_shash_alg(dst_tfm)->base.cra_ctxsize;
char *src_ipad = crypto_shash_ctx(src_tfm);
char *dst_ipad = crypto_shash_ctx(dst_tfm);
/*
* Actual struct definition is hidden, so I assume data we need is at
* the end. In 6.0 the struct has a pointer to crpyto_shash followed by:
* 'u8 ipad[statesize];', then 'u8 opad[statesize];'
*/
src_ipad += ctx_size - 2 * ss;
dst_ipad += ctx_size - 2 * ss;
#endif
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);
return lkca_hash_duplicate(dst, src);
#endif
}
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
crypto_free_shash(alg);
return (ret == 0);
#endif
}
bool lkca_hmac_set_key(struct shash_desc *desc, const uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
ret = crypto_shash_setkey(desc->tfm, key, key_size);
if (ret == 0) {
ret = crypto_shash_init(desc);
}
return ret == 0;
#endif
}
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, key, key_size);
if (ret == 0){
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
}
crypto_free_shash(alg);
return (ret == 0);
#endif
}

View File

@@ -0,0 +1,682 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* libspdm_x509_verify_cert_chain, libspdm_x509_get_cert_from_cert_chain, check
* and prototypes taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
#endif
bool libspdm_x509_construct_certificate(const uint8_t *cert, size_t cert_size,
uint8_t **single_x509_cert)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_construct_certificate_stack(uint8_t **x509_stack, ...)
{
LIBSPDM_ASSERT(false);
return false;
}
void libspdm_x509_free(void *x509_cert)
{
LIBSPDM_ASSERT(false);
}
void libspdm_x509_stack_free(void *x509_stack)
{
LIBSPDM_ASSERT(false);
}
#ifdef USE_LKCA
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
{
static const uint8_t base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
size_t i;
size_t tmp;
size_t size;
uint8_t *ptr = dst;
for (i = 0; (i + 2) < srclen; i += 3)
{
if (ptr - dst + 4 > *p_dstlen)
{
goto Exit;
}
tmp = (src[i] << 16) | (src[i+1] << 8) | (src[i+2]);
*ptr++ = base64[(tmp >> 18) & 63];
*ptr++ = base64[(tmp >> 12) & 63];
*ptr++ = base64[(tmp >> 6) & 63];
*ptr++ = base64[tmp & 63];
}
// 1 byte extra
if (i == srclen - 1)
{
if (ptr - dst + 4 > *p_dstlen)
{
goto Exit;
}
tmp = src[i] << 4;
*ptr++ = base64[(tmp >> 6) & 63];
*ptr++ = base64[tmp & 63];
*ptr++ = '=';
*ptr++ = '=';
}
// 2 byte extra
if (i == srclen - 2)
{
if (ptr - dst + 4 > *p_dstlen)
{
goto Exit;
}
tmp = ((src[i] << 8) | (src[i+1])) << 2;
*ptr++ = base64[(tmp >> 12) & 63];
*ptr++ = base64[(tmp >> 6) & 63];
*ptr++ = base64[tmp & 63];
*ptr++ = '=';
}
*p_dstlen = ptr - dst;
return true;
Exit:
*p_dstlen = 0;
return false;
}
typedef enum {
BASE64_CONV_VALID,
BASE64_CONV_PAD,
BASE64_CONV_INVALID
} BASE64_CONV;
static BASE64_CONV libspdm_decode_base64_chr(uint8_t b64_chr, uint8_t *value)
{
if (b64_chr >= 'A' && b64_chr <= 'Z')
{
*value = b64_chr - 'A';
}
else if (b64_chr >= 'a' && b64_chr <= 'z')
{
*value = b64_chr - 'a' + 26;
}
else if (b64_chr >= '0' && b64_chr <= '9')
{
*value = b64_chr -'0' + 52;
}
else if (b64_chr == '+' || b64_chr == '-')
{
*value = 62;
}
else if (b64_chr == '/' || b64_chr == '_')
{
*value = 63;
}
else if (b64_chr == '=')
{
*value = 0;
return BASE64_CONV_PAD;
}
else
{
return BASE64_CONV_INVALID;
}
return BASE64_CONV_VALID;
}
static bool libspdm_decode_base64_stripped(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
{
const uint8_t *p_read;
uint8_t *p_write;
uint8_t i;
uint8_t bytes;
uint32_t bin_value;
uint8_t char_value;
if (src == NULL || dst == NULL || srclen % 4 != 0)
{
return false;
}
for (p_read = src, p_write = dst; p_read < src + srclen; p_read += 4)
{
for (i = 0, bytes = 3, bin_value = 0; i < 4; i++)
{
if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_PAD)
{
bytes--;
// fallthrough
bin_value <<= 6;
bin_value |= char_value;
}
else if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_VALID)
{
bin_value <<= 6;
bin_value |= char_value;
}
else
{
// attempting to decode an invalid character
goto Exit;
}
}
if (p_write - dst + bytes > *p_dstlen)
{
// buffer too small
goto Exit;
}
switch (bytes)
{
case 3:
*p_write++ = (bin_value & 0x00ff0000) >> 16;
*p_write++ = (bin_value & 0x0000ff00) >> 8;
*p_write++ = (bin_value & 0x000000ff);
break;
case 2:
*p_write++ = (bin_value & 0x00ff0000) >> 16;
*p_write++ = (bin_value & 0x0000ff00) >> 8;
break;
case 1:
*p_write++ = (bin_value & 0x00ff0000) >> 16;
break;
default:
// invalid state in base64
goto Exit;
}
}
*p_dstlen = p_write - dst;
return true;
Exit:
*p_dstlen = 0;
return false;
}
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
{
size_t s_progress;
size_t d_progress;
size_t decode_size;
size_t decoded_size;
// for each round we decode 64 bytes and skip the linebreaks
for (s_progress = d_progress = 0; s_progress < srclen; s_progress += 65)
{
if (s_progress + 65 < srclen)
{
decode_size = 64;
}
else
{
// -1 to avoid decoding the '\n' byte in the end
decode_size = srclen - s_progress - 1;
}
// calculate the size after decoding
decoded_size = (decode_size / 4) * 3;
if (src[decode_size - 1] == '=')
{
decoded_size--;
}
if (src[decoded_size - 2] == '=')
{
decoded_size--;
}
// break early if the buffer is too small
if (*p_dstlen - d_progress < decoded_size)
{
break;
}
if (!libspdm_decode_base64_stripped(src + s_progress, dst + d_progress, decode_size, &decoded_size))
{
return false;
}
d_progress += decoded_size;
}
if (s_progress < srclen)
{
*p_dstlen = 0;
return false;
}
*p_dstlen = d_progress;
return true;
}
#else // USE_LKCA
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
{
return false;
}
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen)
{
return false;
}
#endif // USE_LKCA
static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end,
size_t *length, uint32_t tag)
{
uint64_t max_len = end - ptr;
// Chain must be less than 1 GB
if ((max_len < 2) || (max_len > (1024 * 1024 * 1024))) {
return false;
}
// We only deal with universal and application tags
if (ptr[0] != tag) {
return false;
}
if (ptr[1] < 0x80) {
*length = ptr[1] + 2;
} else if (ptr[1] == 0x81) {
if (max_len < 3) {
return false;
}
*length = ptr[2] + 3;
} else if (ptr[1] == 0x82) {
if (max_len < 4) {
return false;
}
*length = (ptr[2] << 8) + ptr[3] + 4;
} else {
// In theory it could be bigger than 64KB
return false;
}
if (*length > max_len) {
return false;
}
return true;
}
bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length,
uint32_t tag)
{
return lkca_asn1_get_tag(*ptr, end, length, tag);
}
bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_organization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context)
{
LIBSPDM_ASSERT(false);
return false;
}
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context)
{
#ifdef USE_LKCA
bool ret = false;
uint32_t key_size = 0;
struct key_preparsed_payload lkca_cert;
struct public_key *pub;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
if (cert == NULL) {
return false;
}
if(key_type_asymmetric.preparse(&lkca_cert)) {
return false;
}
pub = lkca_cert.payload.data[asym_crypto];
// -1 is since lkca prepends '4' to public keys...
key_size = pub->keylen - 1;
if (key_size == (2 * 256 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP256R1);
} else if (key_size == (2 * 384 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP384R1);
} else {
goto err;
}
if (*ec_context == NULL) {
goto err;
}
// Again skip '4' in key to be in line with spdm protocol. We will add it
// back in ecda_verify
if (!lkca_ec_set_pub_key(*ec_context, (char *) pub->key + 1, key_size)) {
libspdm_ec_free(*ec_context);
goto err;
}
ret = true;
err:
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context)
{
LIBSPDM_ASSERT(false);
return false;
}
static int lkca_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
#ifdef USE_LKCA
int ret;
struct key_preparsed_payload lkca_cert;
struct key_preparsed_payload lkca_ca_cert;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
lkca_ca_cert.data = ca_cert;
lkca_ca_cert.datalen = ca_cert_size;
ret = key_type_asymmetric.preparse(&lkca_cert);
if (ret) {
return ret;
}
ret = key_type_asymmetric.preparse(&lkca_ca_cert);
if (ret) {
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
}
ret = public_key_verify_signature(lkca_ca_cert.payload.data[asym_crypto],
lkca_cert.payload.data[asym_auth]);
key_type_asymmetric.free_preparse(&lkca_cert);
key_type_asymmetric.free_preparse(&lkca_ca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
return lkca_x509_verify_cert(cert, cert_size, ca_cert, ca_cert_size) == 0;
}
bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain, size_t cert_chain_length)
{
size_t preceding_cert_len;
const uint8_t *preceding_cert;
size_t current_cert_len;
const uint8_t *current_cert;
bool verify_flag;
int ret;
verify_flag = false;
preceding_cert = root_cert;
preceding_cert_len = root_cert_length;
current_cert = cert_chain;
/* Get Current certificate from certificates buffer and Verify with preceding cert*/
do {
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &current_cert_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
ret = lkca_x509_verify_cert(current_cert, current_cert_len,
preceding_cert, preceding_cert_len);
if (ret != 0) {
verify_flag = false;
break;
} else {
verify_flag = true;
}
preceding_cert = current_cert;
preceding_cert_len = current_cert_len;
current_cert = current_cert + current_cert_len;
} while (true);
return verify_flag;
}
bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length)
{
size_t asn1_len;
int32_t current_index;
size_t current_cert_len;
const uint8_t *current_cert;
current_cert_len = 0;
/* Check input parameters.*/
if ((cert_chain == NULL) || (cert == NULL) || (cert_index < -1) ||
(cert_length == NULL)) {
return false;
}
current_cert = cert_chain;
current_index = -1;
/* Traverse the certificate chain*/
while (true) {
/* Get asn1 tag len*/
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &asn1_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
current_cert_len = asn1_len;
current_index++;
if (current_index == cert_index) {
*cert = current_cert;
*cert_length = current_cert_len;
return true;
}
current_cert = current_cert + current_cert_len;
}
/* If cert_index is -1, Return the last certificate*/
if (cert_index == -1 && current_index >= 0) {
*cert = current_cert - current_cert_len;
*cert_length = current_cert_len;
return true;
}
return false;
}
bool libspdm_x509_get_tbs_cert(const uint8_t *cert, size_t cert_size,
uint8_t **tbs_cert, size_t *tbs_cert_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size,
size_t *version)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_orgnization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
size_t cert_size, uint8_t *oid,
size_t *oid_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size,
size_t *usage)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_set_date_time(char const *date_time_str, void *date_time, size_t *date_time_size)
{
LIBSPDM_ASSERT(false);
return false;
}
int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2)
{
LIBSPDM_ASSERT(false);
return -3;
}
bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer)
{
LIBSPDM_ASSERT(false);
return false;
}

1546
kernel-open/nvidia/nv-acpi.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/backlight.h>
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level
(
nv_state_t *nv,
NvU32 *brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
*brightness = bd->props.brightness;
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level
(
nv_state_t *nv,
NvU32 brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
bd->props.brightness = brightness;
backlight_update_status(bd);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,108 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/bpmp.h>
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
/*!
* @brief Sends an MRQ (message-request) to BPMP
*
* The request, response, and ret parameters of this function correspond to the
* components of the tegra_bpmp_message struct, which BPMP uses to receive
* MRQs.
*
* @param[in] nv Per GPU Linux state
* @param[in] mrq MRQ_xxx ID specifying what is requested
* @param[in] request_data Pointer to request input data
* @param[in] request_data_size Size of structure pointed to by pRequestData
* @param[out] response_data Pointer to response output data
* @param[in] response_data_size Size of structure pointed to by pResponseData
* @param[out] ret MRQ return code (from "ret" element of
* tegra_bpmp_message struct)
* @param[out] api_ret Return code from tegra_bpmp_transfer call
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available,
* NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not
* be obtained from nv, or
* NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet
* for Linux error code).
*/
NV_STATUS NV_API_CALL
nv_bpmp_send_mrq
(
nv_state_t *nv,
NvU32 mrq,
const void *request_data,
NvU32 request_data_size,
void *response_data,
NvU32 response_data_size,
NvS32 *ret,
NvS32 *api_ret
)
{
#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_INVALID_POINTER;
}
// Send the MRQ request to BPMP.
memset(&msg, 0, sizeof(msg));
msg.mrq = mrq;
msg.tx.data = request_data;
msg.tx.size = (size_t) request_data_size;
msg.rx.data = response_data;
msg.rx.size = (size_t) response_data_size;
*api_ret = (NvS32) tegra_bpmp_transfer(bpmp, &msg);
if (*api_ret == 0)
{
*ret = (NvS32) msg.rx.ret;
return NV_OK;
}
else
{
return NV_ERR_GENERIC;
}
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,240 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-caps-imex.h"
extern int NVreg_ImexChannelCount;
extern int NVreg_CreateImexChannel0;
static int nv_caps_imex_open(struct inode *inode, struct file *file)
{
return 0;
}
static int nv_caps_imex_release(struct inode *inode, struct file *file)
{
return 0;
}
static struct file_operations g_nv_caps_imex_fops =
{
.owner = THIS_MODULE,
.open = nv_caps_imex_open,
.release = nv_caps_imex_release
};
static struct class *g_nv_caps_imex_class;
static struct
{
NvBool initialized;
struct cdev cdev;
dev_t channel0;
struct device *dev_channel0;
} g_nv_caps_imex;
int NV_API_CALL nv_caps_imex_channel_get(int fd)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
struct file *file;
struct inode *inode;
int channel = -1;
file = fget(fd);
if (file == NULL)
{
return channel;
}
inode = NV_FILE_INODE(file);
if (inode == NULL)
{
goto out;
}
/* Make sure the fd belongs to the nv-caps-imex-drv */
if (file->f_op != &g_nv_caps_imex_fops)
{
goto out;
}
/* minor number is same as channel */
channel = MINOR(inode->i_rdev);
out:
fput(file);
return channel;
#else
return -1;
#endif
}
int NV_API_CALL nv_caps_imex_channel_count(void)
{
return NVreg_ImexChannelCount;
}
static void nv_caps_imex_remove_channel0(void)
{
if (g_nv_caps_imex_class == NULL)
return;
device_destroy(g_nv_caps_imex_class, g_nv_caps_imex.channel0);
class_destroy(g_nv_caps_imex_class);
g_nv_caps_imex_class = NULL;
}
#if defined(NV_CLASS_DEVNODE_HAS_CONST_ARG)
static char *nv_caps_imex_devnode(const struct device *dev, umode_t *mode)
#else
static char *nv_caps_imex_devnode(struct device *dev, umode_t *mode)
#endif
{
if (!mode)
return NULL;
//
// Handle only world visible channel0, otherwise let the kernel apply
// defaults (root only access)
//
if (dev->devt == g_nv_caps_imex.channel0)
*mode = S_IRUGO | S_IWUGO;
return NULL;
}
static int nv_caps_imex_add_channel0(void)
{
#if defined(NV_CLASS_CREATE_HAS_NO_OWNER_ARG)
g_nv_caps_imex_class = class_create("nvidia-caps-imex-channels");
#else
g_nv_caps_imex_class = class_create(THIS_MODULE, "nvidia-caps-imex-channels");
#endif
if (IS_ERR(g_nv_caps_imex_class))
{
nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to register class.\n");
return -1;
}
// Install udev callback
g_nv_caps_imex_class->devnode = nv_caps_imex_devnode;
g_nv_caps_imex.dev_channel0 = device_create(g_nv_caps_imex_class, NULL,
g_nv_caps_imex.channel0, NULL,
"nvidia-caps-imex-channels!channel%d", 0);
if (IS_ERR(g_nv_caps_imex.dev_channel0))
{
nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to create channel0.\n");
class_destroy(g_nv_caps_imex_class);
g_nv_caps_imex_class = NULL;
return -1;
}
nv_printf(NV_DBG_ERRORS, "nv-caps-imex channel0 created. "
"Make sure you are aware of the IMEX security model.\n");
return 0;
}
int NV_API_CALL nv_caps_imex_init(void)
{
int rc;
if (g_nv_caps_imex.initialized)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-imex is already initialized.\n");
return -EBUSY;
}
if (NVreg_ImexChannelCount == 0)
{
nv_printf(NV_DBG_INFO, "nv-caps-imex is disabled.\n");
return 0;
}
g_nv_caps_imex_class = NULL;
g_nv_caps_imex.dev_channel0 = NULL;
rc = alloc_chrdev_region(&g_nv_caps_imex.channel0, 0,
NVreg_ImexChannelCount,
"nvidia-caps-imex-channels");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to create cdev.\n");
return rc;
}
cdev_init(&g_nv_caps_imex.cdev, &g_nv_caps_imex_fops);
g_nv_caps_imex.cdev.owner = THIS_MODULE;
rc = cdev_add(&g_nv_caps_imex.cdev, g_nv_caps_imex.channel0,
NVreg_ImexChannelCount);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to add cdev.\n");
goto cdev_add_fail;
}
if (NVreg_CreateImexChannel0 == 1)
{
rc = nv_caps_imex_add_channel0();
if (rc < 0)
goto channel0_add_fail;
}
g_nv_caps_imex.initialized = NV_TRUE;
return 0;
channel0_add_fail:
cdev_del(&g_nv_caps_imex.cdev);
cdev_add_fail:
unregister_chrdev_region(g_nv_caps_imex.channel0, NVreg_ImexChannelCount);
return rc;
}
void NV_API_CALL nv_caps_imex_exit(void)
{
if (!g_nv_caps_imex.initialized)
{
return;
}
nv_caps_imex_remove_channel0();
cdev_del(&g_nv_caps_imex.cdev);
unregister_chrdev_region(g_nv_caps_imex.channel0, NVreg_ImexChannelCount);
g_nv_caps_imex.initialized = NV_FALSE;
}

View File

@@ -0,0 +1,34 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CAPS_IMEX_H_
#define _NV_CAPS_IMEX_H_
#include <nv-kernel-interface-api.h>
int NV_API_CALL nv_caps_imex_init(void);
void NV_API_CALL nv_caps_imex_exit(void);
int NV_API_CALL nv_caps_imex_channel_get(int fd);
int NV_API_CALL nv_caps_imex_channel_count(void);
#endif /* _NV_CAPS_IMEX_H_ */

View File

@@ -0,0 +1,878 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-caps.h"
#include "nv-procfs.h"
#include "nv-hash.h"
#include "nvmisc.h"
extern int NVreg_ModifyDeviceFiles;
/* sys_close() or __close_fd() */
#include <linux/syscalls.h>
#define NV_CAP_DRV_MINOR_COUNT 8192
/* Hash table with 512 buckets */
#define NV_CAP_HASH_BITS 9
NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS);
#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table)
#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE)
typedef struct nv_cap_table_entry
{
/* name must be the first element */
const char *name;
int minor;
struct hlist_node hlist;
} nv_cap_table_entry_t;
#define NV_CAP_NUM_ENTRIES(_table) (NV_ARRAY_ELEMENTS(_table))
static nv_cap_table_entry_t g_nv_cap_nvlink_table[] =
{
{"/driver/nvidia-nvlink/capabilities/fabric-mgmt"}
};
static nv_cap_table_entry_t g_nv_cap_mig_table[] =
{
{"/driver/nvidia/capabilities/mig/config"},
{"/driver/nvidia/capabilities/mig/monitor"}
};
static nv_cap_table_entry_t g_nv_cap_sys_table[] =
{
};
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
{_gi "/ci0/access"}, \
{_gi "/ci1/access"}, \
{_gi "/ci2/access"}, \
{_gi "/ci3/access"}, \
{_gi "/ci4/access"}, \
{_gi "/ci5/access"}, \
{_gi "/ci6/access"}, \
{_gi "/ci7/access"}
#define NV_CAP_MIG_GI_ENTRIES(_gpu) \
{_gpu "/gi0/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \
{_gpu "/gi1/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \
{_gpu "/gi2/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \
{_gpu "/gi3/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \
{_gpu "/gi4/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \
{_gpu "/gi5/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \
{_gpu "/gi6/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \
{_gpu "/gi7/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \
{_gpu "/gi8/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \
{_gpu "/gi9/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \
{_gpu "/gi10/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \
{_gpu "/gi11/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \
{_gpu "/gi12/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \
{_gpu "/gi13/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \
{_gpu "/gi14/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14")
static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] =
{
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig")
};
struct nv_cap
{
char *path;
char *name;
int minor;
int permissions;
int modify;
struct proc_dir_entry *parent;
struct proc_dir_entry *entry;
};
#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128
typedef struct nv_cap_file_private
{
int minor;
int permissions;
int modify;
char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE];
off_t offset;
} nv_cap_file_private_t;
struct
{
NvBool initialized;
struct cdev cdev;
dev_t devno;
} g_nv_cap_drv;
#define NV_CAP_PROCFS_DIR "driver/nvidia-caps"
#define NV_CAP_NAME_BUF_SIZE 128
static struct proc_dir_entry *nv_cap_procfs_dir;
static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_nvlink_table[i].name,
"/driver/nvidia-nvlink/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_sys_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_sys_table[i].name,
"/driver/nvidia/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_mig_minors(struct seq_file *s, void *v)
{
int i, count, gpu;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_table[i].name,
"/driver/nvidia/capabilities/mig/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor);
}
}
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_gpu_table[i].name,
"/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "gpu%d/%s %d\n",
gpu, name, g_nv_cap_mig_gpu_table[i].minor);
}
}
return 0;
}
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock);
static void nv_cap_procfs_exit(void)
{
if (!nv_cap_procfs_dir)
{
return;
}
#if defined(CONFIG_PROC_FS)
proc_remove(nv_cap_procfs_dir);
#endif
nv_cap_procfs_dir = NULL;
}
static int nv_cap_procfs_init(void)
{
static struct proc_dir_entry *file_entry;
nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL);
if (nv_cap_procfs_dir == NULL)
{
return -EACCES;
}
file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir,
mig_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir,
nvlink_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir,
sys_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
return 0;
cleanup:
nv_cap_procfs_exit();
return -EACCES;
}
static int nv_cap_find_minor(char *path)
{
unsigned int key = nv_cap_hash_key(path);
nv_cap_table_entry_t *entry;
nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key)
{
if (strcmp(path, entry->name) == 0)
{
return entry->minor;
}
}
return -1;
}
static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count)
{
int i;
unsigned int key;
static int minor = 0;
for (i = 0; i < count; i++)
{
table[i].minor = minor++;
INIT_HLIST_NODE(&table[i].hlist);
key = nv_cap_hash_key(table[i].name);
nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key);
}
WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT);
}
#define nv_cap_table_init(table) \
_nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table))
static void nv_cap_tables_init(void)
{
BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0);
nv_hash_init(g_nv_cap_hash_table);
nv_cap_table_init(g_nv_cap_nvlink_table);
nv_cap_table_init(g_nv_cap_mig_table);
nv_cap_table_init(g_nv_cap_mig_gpu_table);
nv_cap_table_init(g_nv_cap_sys_table);
}
static ssize_t nv_cap_procfs_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *pos)
{
nv_cap_file_private_t *private = NULL;
unsigned long bytes_left;
char *proc_buffer;
int status;
status = nv_down_read_interruptible(&nv_system_pm_lock);
if (status < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to lock the nv_system_pm_lock!\n");
return status;
}
private = ((struct seq_file *)file->private_data)->private;
bytes_left = (sizeof(private->buffer) - private->offset - 1);
if (count == 0)
{
count = -EINVAL;
goto done;
}
if ((bytes_left == 0) || (count > bytes_left))
{
count = -ENOSPC;
goto done;
}
proc_buffer = &private->buffer[private->offset];
if (copy_from_user(proc_buffer, buffer, count))
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n");
count = -EFAULT;
goto done;
}
private->offset += count;
proc_buffer[count] = '\0';
*pos = private->offset;
done:
up_read(&nv_system_pm_lock);
return count;
}
static int nv_cap_procfs_read(struct seq_file *s, void *v)
{
int status;
nv_cap_file_private_t *private = s->private;
status = nv_down_read_interruptible(&nv_system_pm_lock);
if (status < 0)
{
return status;
}
seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor);
seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions);
seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify);
up_read(&nv_system_pm_lock);
return 0;
}
static int nv_cap_procfs_open(struct inode *inode, struct file *file)
{
nv_cap_file_private_t *private = NULL;
int rc;
nv_cap_t *cap = NV_PDE_DATA(inode);
NV_KMALLOC(private, sizeof(nv_cap_file_private_t));
if (private == NULL)
{
return -ENOMEM;
}
private->minor = cap->minor;
private->permissions = cap->permissions;
private->offset = 0;
private->modify = cap->modify;
rc = single_open(file, nv_cap_procfs_read, private);
if (rc < 0)
{
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
return rc;
}
static int nv_cap_procfs_release(struct inode *inode, struct file *file)
{
struct seq_file *s = file->private_data;
nv_cap_file_private_t *private = NULL;
char *buffer;
int modify;
nv_cap_t *cap = NV_PDE_DATA(inode);
if (s != NULL)
{
private = s->private;
}
single_release(inode, file);
if (private != NULL)
{
buffer = private->buffer;
if (private->offset != 0)
{
if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1)
{
cap->modify = modify;
}
}
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
/*
* All open files using the proc entry will be invalidated
* if the entry is removed.
*/
file->private_data = NULL;
return 0;
}
static nv_proc_ops_t g_nv_cap_procfs_fops = {
NV_PROC_OPS_SET_OWNER()
.NV_PROC_OPS_OPEN = nv_cap_procfs_open,
.NV_PROC_OPS_RELEASE = nv_cap_procfs_release,
.NV_PROC_OPS_WRITE = nv_cap_procfs_write,
.NV_PROC_OPS_READ = seq_read,
.NV_PROC_OPS_LSEEK = seq_lseek,
};
/* forward declaration of g_nv_cap_drv_fops */
static struct file_operations g_nv_cap_drv_fops;
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
struct file *file;
int dup_fd;
struct inode *inode = NULL;
dev_t rdev = 0;
if (cap == NULL)
{
return -1;
}
file = fget(fd);
if (file == NULL)
{
return -1;
}
inode = NV_FILE_INODE(file);
if (inode == NULL)
{
goto err;
}
/* Make sure the fd belongs to the nv-cap-drv */
if (file->f_op != &g_nv_cap_drv_fops)
{
goto err;
}
/* Make sure the fd has the expected capability */
rdev = inode->i_rdev;
if (MINOR(rdev) != cap->minor)
{
goto err;
}
dup_fd = get_unused_fd_flags(O_CLOEXEC);
if (dup_fd < 0)
{
goto err;
}
fd_install(dup_fd, file);
return dup_fd;
err:
fput(file);
return -1;
#else
return -1;
#endif
}
void NV_API_CALL nv_cap_close_fd(int fd)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
struct file *file;
NvBool is_nv_cap_fd;
if (fd == -1)
{
return;
}
/*
* Acquire task_lock as we access current->files explicitly (__close_fd)
* and implicitly (sys_close), and it will race with the exit path.
*/
task_lock(current);
/* Nothing to do, we are in exit path */
if (current->files == NULL)
{
task_unlock(current);
return;
}
file = fget(fd);
if (file == NULL)
{
task_unlock(current);
return;
}
/* Make sure the fd belongs to the nv-cap-drv */
is_nv_cap_fd = (file->f_op == &g_nv_cap_drv_fops);
fput(file);
/*
* In some cases, we may be in shutdown path and execute
* in context of unrelated process. In that case we should
* not access any 'current' state, but instead let kernel
* clean up capability files on its own.
*/
if (!is_nv_cap_fd)
{
task_unlock(current);
return;
}
/*
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
* and started exporting __close_fd, as of this commit:
* 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel
* calls to sys_close()")
* Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started
* exporting close_fd, as of this commit:
* 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove
* the files parameter")
*/
#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd
close_fd(fd);
#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd
__close_fd(current->files, fd);
#else
sys_close(fd);
#endif
task_unlock(current);
#endif
}
static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name)
{
nv_cap_t *cap;
int len;
if (parent_cap == NULL || name == NULL)
{
return NULL;
}
NV_KMALLOC(cap, sizeof(nv_cap_t));
if (cap == NULL)
{
return NULL;
}
len = strlen(name) + strlen(parent_cap->path) + 2;
NV_KMALLOC(cap->path, len);
if (cap->path == NULL)
{
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->path, parent_cap->path);
strcat(cap->path, "/");
strcat(cap->path, name);
len = strlen(name) + 1;
NV_KMALLOC(cap->name, len);
if (cap->name == NULL)
{
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->name, name);
cap->minor = -1;
cap->modify = NVreg_ModifyDeviceFiles;
return cap;
}
static void nv_cap_free(nv_cap_t *cap)
{
if (cap == NULL)
{
return;
}
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap->name, strlen(cap->name) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
}
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
int minor;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
mode = (S_IFREG | S_IRUGO);
minor = nv_cap_find_minor(cap->path);
if (minor < 0)
{
nv_cap_free(cap);
return NULL;
}
cap->minor = minor;
cap->entry = proc_create_data(name, mode, parent_cap->entry,
&g_nv_cap_procfs_fops, (void*)cap);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
cap->minor = -1;
mode = (S_IFDIR | S_IRUGO | S_IXUGO);
cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_init(const char *path)
{
nv_cap_t parent_cap;
nv_cap_t *cap;
int mode;
char *name = NULL;
char dir[] = "/capabilities";
if (path == NULL)
{
return NULL;
}
NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1);
if (name == NULL)
{
return NULL;
}
strcpy(name, path);
strcat(name, dir);
parent_cap.entry = NULL;
parent_cap.path = "";
parent_cap.name = "";
mode = S_IRUGO | S_IXUGO;
cap = nv_cap_create_dir_entry(&parent_cap, name, mode);
NV_KFREE(name, strlen(name) + 1);
return cap;
}
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap)
{
if (WARN_ON(cap == NULL))
{
return;
}
remove_proc_entry(cap->name, cap->parent);
nv_cap_free(cap);
}
static int nv_cap_drv_open(struct inode *inode, struct file *file)
{
return 0;
}
static int nv_cap_drv_release(struct inode *inode, struct file *file)
{
return 0;
}
static struct file_operations g_nv_cap_drv_fops =
{
.owner = THIS_MODULE,
.open = nv_cap_drv_open,
.release = nv_cap_drv_release
};
int NV_API_CALL nv_cap_drv_init(void)
{
int rc;
nv_cap_tables_init();
if (g_nv_cap_drv.initialized)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n");
return -EBUSY;
}
rc = alloc_chrdev_region(&g_nv_cap_drv.devno,
0,
NV_CAP_DRV_MINOR_COUNT,
"nvidia-caps");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n");
return rc;
}
cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops);
g_nv_cap_drv.cdev.owner = THIS_MODULE;
rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno,
NV_CAP_DRV_MINOR_COUNT);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n");
goto cdev_add_fail;
}
rc = nv_cap_procfs_init();
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n");
goto proc_init_fail;
}
g_nv_cap_drv.initialized = NV_TRUE;
return 0;
proc_init_fail:
cdev_del(&g_nv_cap_drv.cdev);
cdev_add_fail:
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
return rc;
}
void NV_API_CALL nv_cap_drv_exit(void)
{
if (!g_nv_cap_drv.initialized)
{
return;
}
nv_cap_procfs_exit();
cdev_del(&g_nv_cap_drv.cdev);
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
g_nv_cap_drv.initialized = NV_FALSE;
}

932
kernel-open/nvidia/nv-clk.c Normal file
View File

@@ -0,0 +1,932 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-platform.h"
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/bpmp.h>
// Use the CCF APIs if enabled in Kernel config and RM build
// has Dual license define enabled.
#if defined(CONFIG_COMMON_CLK)
#define HAS_COMMON_CLOCK_FRAMEWORK 1
#else
#define HAS_COMMON_CLOCK_FRAMEWORK 0
#endif
#if HAS_COMMON_CLOCK_FRAMEWORK
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
/*!
* @brief The below defined static const array points to the
* clock mentioned in enum defined in below file.
*
* arch/nvalloc/unix/include/nv.h
* enum TEGRASOC_WHICH_CLK
*
* The order should be maintained/updated together.
*/
static const char *osMapClk[] = {
[TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P2] = "nvdisplay_p2_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P3] = "nvdisplay_p3_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P4] = "nvdisplay_p4_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P5] = "nvdisplay_p5_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P6] = "nvdisplay_p6_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P7] = "nvdisplay_p7_clk",
[TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk",
[TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk",
[TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk",
[TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk",
[TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk",
[TEGRASOC_WHICH_CLK_VPLL2] = "vpll2_clk",
[TEGRASOC_WHICH_CLK_VPLL3] = "vpll3_clk",
[TEGRASOC_WHICH_CLK_VPLL4] = "vpll4_clk",
[TEGRASOC_WHICH_CLK_VPLL5] = "vpll5_clk",
[TEGRASOC_WHICH_CLK_VPLL6] = "vpll6_clk",
[TEGRASOC_WHICH_CLK_VPLL7] = "vpll7_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk",
[TEGRASOC_WHICH_CLK_RG0] = "rg0_clk",
[TEGRASOC_WHICH_CLK_RG1] = "rg1_clk",
[TEGRASOC_WHICH_CLK_RG2] = "rg2_clk",
[TEGRASOC_WHICH_CLK_RG3] = "rg3_clk",
[TEGRASOC_WHICH_CLK_RG4] = "rg4_clk",
[TEGRASOC_WHICH_CLK_RG5] = "rg5_clk",
[TEGRASOC_WHICH_CLK_RG6] = "rg6_clk",
[TEGRASOC_WHICH_CLK_RG7] = "rg7_clk",
[TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk",
[TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk",
[TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk",
[TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk",
[TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR2] = "pre_sor2_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR3] = "pre_sor3_clk",
[TEGRASOC_WHICH_CLK_DP_LINKA_REF] = "dp_link_ref_clk",
[TEGRASOC_WHICH_CLK_DP_LINKB_REF] = "dp_linkb_ref_clk",
[TEGRASOC_WHICH_CLK_DP_LINKC_REF] = "dp_linkc_ref_clk",
[TEGRASOC_WHICH_CLK_DP_LINKD_REF] = "dp_linkd_ref_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT] = "sor_linkb_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT] = "sor_linkc_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT] = "sor_linkd_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO] = "sor_linkb_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO] = "sor_linkc_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO] = "sor_linkd_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk",
[TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk",
[TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk",
[TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk",
[TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk",
[TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk",
[TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk",
[TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk",
[TEGRASOC_WHICH_CLK_SOR2] = "sor2_clk",
[TEGRASOC_WHICH_CLK_SOR3] = "sor3_clk",
[TEGRASOC_WHICH_CLK_SOR_PADA_INPUT] = "sor_pad_input_clk",
[TEGRASOC_WHICH_CLK_SOR_PADB_INPUT] = "sor_padb_input_clk",
[TEGRASOC_WHICH_CLK_SOR_PADC_INPUT] = "sor_padc_input_clk",
[TEGRASOC_WHICH_CLK_SOR_PADD_INPUT] = "sor_padd_input_clk",
[TEGRASOC_WHICH_CLK_SOR0_PAD] = "sor0_pad_clk",
[TEGRASOC_WHICH_CLK_SOR1_PAD] = "sor1_pad_clk",
[TEGRASOC_WHICH_CLK_SOR2_PAD] = "sor2_pad_clk",
[TEGRASOC_WHICH_CLK_SOR3_PAD] = "sor3_pad_clk",
[TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk",
[TEGRASOC_WHICH_CLK_SF0] = "sf0_clk",
[TEGRASOC_WHICH_CLK_SF1] = "sf1_clk",
[TEGRASOC_WHICH_CLK_SF2] = "sf2_clk",
[TEGRASOC_WHICH_CLK_SF3] = "sf3_clk",
[TEGRASOC_WHICH_CLK_SF4] = "sf4_clk",
[TEGRASOC_WHICH_CLK_SF5] = "sf5_clk",
[TEGRASOC_WHICH_CLK_SF6] = "sf6_clk",
[TEGRASOC_WHICH_CLK_SF7] = "sf7_clk",
[TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk",
[TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR2_PLL_REF] = "sor2_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR3_PLL_REF] = "sor3_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk",
[TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk",
[TEGRASOC_WHICH_CLK_SOR2_REF] = "sor2_ref_clk",
[TEGRASOC_WHICH_CLK_SOR3_REF] = "sor3_ref_clk",
[TEGRASOC_WHICH_CLK_OSC] = "osc_clk",
[TEGRASOC_WHICH_CLK_DSC] = "dsc_clk",
[TEGRASOC_WHICH_CLK_MAUD] = "maud_clk",
[TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk",
[TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk",
[TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk",
[TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk",
[TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk",
[TEGRASOC_WHICH_CLK_DISP_ROOT] = "disp_root",
[TEGRASOC_WHICH_CLK_HUB_ROOT] = "hub_root",
[TEGRASOC_WHICH_CLK_PLLA_DISP] = "plla_disp",
[TEGRASOC_WHICH_CLK_PLLA_DISPHUB] = "plla_disphub",
[TEGRASOC_WHICH_CLK_PLLA] = "plla",
[TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED] = "vpllx_sor0_muxed_clk",
[TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED] = "vpllx_sor1_muxed_clk",
[TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED] = "vpllx_sor2_muxed_clk",
[TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED] = "vpllx_sor3_muxed_clk",
[TEGRASOC_WHICH_CLK_SF0_SOR] = "sf0_sor_clk",
[TEGRASOC_WHICH_CLK_SF1_SOR] = "sf1_sor_clk",
[TEGRASOC_WHICH_CLK_SF2_SOR] = "sf2_sor_clk",
[TEGRASOC_WHICH_CLK_SF3_SOR] = "sf3_sor_clk",
[TEGRASOC_WHICH_CLK_SF4_SOR] = "sf4_sor_clk",
[TEGRASOC_WHICH_CLK_SF5_SOR] = "sf5_sor_clk",
[TEGRASOC_WHICH_CLK_SF6_SOR] = "sf6_sor_clk",
[TEGRASOC_WHICH_CLK_SF7_SOR] = "sf7_sor_clk",
[TEGRASOC_WHICH_CLK_EMC] = "emc_clk",
[TEGRASOC_WHICH_CLK_GPU_SYS] = "sysclk",
[TEGRASOC_WHICH_CLK_GPU_NVD] = "nvdclk",
[TEGRASOC_WHICH_CLK_GPU_UPROC] = "uprocclk",
[TEGRASOC_WHICH_CLK_GPU_GPC0] = "gpc0clk",
[TEGRASOC_WHICH_CLK_GPU_GPC1] = "gpc1clk",
[TEGRASOC_WHICH_CLK_GPU_GPC2] = "gpc2clk",
};
#endif
/*!
* @brief Get the clock handles.
*
* Look up and obtain the clock handles for each display
* clock at boot-time and later using all those handles
* for rest of the operations. for example, enable/disable
* clocks, get current/max frequency of the clock.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_clk_get_handles(
nv_state_t *nv)
{
NV_STATUS status = NV_OK;
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i, j;
int clk_count;
struct clk_bulk_data *clks;
clk_count = devm_clk_bulk_get_all(nvl->dev, &clks);
if (clk_count <= 0)
{
nv_printf(NV_DBG_INFO,"NVRM: No clk handles for the dev\n");
status = NV_ERR_OBJECT_NOT_FOUND;
}
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < clk_count; i++)
{
for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++)
{
if (!strcmp(osMapClk[j], clks[i].id))
{
nvl->soc_clk_handles.clk[j].handles = clks[i].clk;
nvl->soc_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk);
break;
}
}
if (j == TEGRASOC_WHICH_CLK_MAX)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id);
return NV_ERR_OBJECT_NOT_FOUND;
}
}
#else
nv_printf(NV_DBG_INFO, "NVRM: devm_clk_bulk_get_all API is not present\n");
status = NV_ERR_FEATURE_NOT_ENABLED;
#endif
return status;
}
/*!
* @brief Enable the clock.
*
* Enabling the clock before performing any operation
* on it. The below function will prepare the clock for use
* and enable them.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_enable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_prepare_enable(nvl->soc_clk_handles.clk[whichClkOS].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Disable the clock.
*
* Disabling the clock after performing operation or required
* work with that clock is done with that particular clock.
* The below function will unprepare the clock for further use
* and disable them.
*
* Note: make sure to disable clock before clk_put is called.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*/
void NV_API_CALL nv_disable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
clk_disable_unprepare(nvl->soc_clk_handles.clk[whichClkOS].handles);
}
/*!
* @brief Get current clock frequency.
*
* Obtain the current clock rate for a clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pCurrFreqKHz Current clock frequency
*/
NV_STATUS NV_API_CALL nv_get_curr_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
unsigned long currFreqHz;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
currFreqHz = clk_get_rate(nvl->soc_clk_handles.clk[whichClkOS].handles);
*pCurrFreqKHz = currFreqHz / 1000U;
if (*pCurrFreqKHz > 0U)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get maximum clock frequency.
*
* Obtain the maximum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMaxFreqKHz Maximum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_max_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
long ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the maximum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// maximum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending NV_S64_MAX will return maxFreq.
//
ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, NV_U32_MAX);
if (ret >= 0)
{
*pMaxFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get minimum clock frequency.
*
* Obtain the minimum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMinFreqKHz Minimum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_min_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
long ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the minimum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// minimum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < minFreq currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending 0 will return minFreq.
//
ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, 0);
if (ret >= 0)
{
*pMinFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set clock frequency.
*
* Setting the frequency of clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[in] reqFreqKHz Required frequency
*/
NV_STATUS NV_API_CALL nv_set_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_set_rate(nvl->soc_clk_handles.clk[whichClkOS].handles,
reqFreqKHz * 1000U);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
#else
NV_STATUS NV_API_CALL nv_clk_get_handles
(
nv_state_t *nv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_enable_clk
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS
)
{
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL nv_disable_clk
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS
)
{
return;
}
NV_STATUS NV_API_CALL nv_get_curr_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pCurrFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_max_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pMaxFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_min_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 *pMinFreqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_set_freq
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOS,
NvU32 freqKHz
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif
/*!
* @brief Clear the clock handles assigned by nv_clk_get_handles()
*
* Clear the clock handle for each display of the clocks at shutdown-time.
* Since clock handles are obtained by devm managed devm_clk_bulk_get_all()
* API, devm_clk_bulk_release_all() API is called on all the enumerated
* clk handles automatically when module gets unloaded. Hence, no need
* to explicitly free those handles.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*/
void NV_API_CALL nv_clk_clear_handles(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i;
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (nvl->soc_clk_handles.clk[i].handles != NULL)
{
nvl->soc_clk_handles.clk[i].handles = NULL;
}
}
}
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
/*!
* @brief set parent clock.
*
* Setting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] whichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
int ret;
if ((nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL) &&
(nvl->soc_clk_handles.clk[whichClkOSparent].handles != NULL))
{
ret = clk_set_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles,
nvl->soc_clk_handles.clk[whichClkOSparent].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief get parent clock.
*
* Getting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] pWhichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct clk *ret;
NvU32 i;
if (nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL)
{
ret = clk_get_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles);
if (!IS_ERR_OR_NULL(ret))
{
const char *parentClkName = __clk_get_name(ret);
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
//
// soc_clk_handles has array of clks supported on all chips.
// So depending on the chip, some clks may not be present.
//
if (nvl->soc_clk_handles.clk[i].clkName == NULL)
{
continue;
}
if (!strcmp(nvl->soc_clk_handles.clk[i].clkName, parentClkName))
{
*pWhichClkOSparent = i;
return NV_OK;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret);
return NV_ERR_INVALID_OBJECT_PARENT;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret));
return NV_ERR_INVALID_POINTER;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n");
return NV_ERR_OBJECT_NOT_FOUND;
}
/*!
* @brief Check if clock is enable or not.
*
* Checking the clock status if it is enabled or not before
* enabling or disabling it.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns clock status.
*/
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
bool ret = false;
if (nvl->soc_clk_handles.clk[whichClkOS].handles == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n");
return NV_FALSE;
}
ret = __clk_is_enabled(nvl->soc_clk_handles.clk[whichClkOS].handles);
return ret == true;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
(
nv_state_t *nv,
NvU32 link_rate,
NvU32 lanes_bitmap
)
{
#if defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_INIT;
req.display_port_init.link_rate = link_rate;
req.display_port_init.lanes_bitmap = lanes_bitmap;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
{
#if defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_OFF;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll de-initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
#else
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
return NV_ERR_NOT_SUPPORTED;
}
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
return NV_FALSE;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
(
nv_state_t *nv,
NvU32 link_rate,
NvU32 lanes_bitmap
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -0,0 +1,217 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(CONFIG_CRAY_XT)
enum {
NV_FORMAT_STATE_ORDINARY,
NV_FORMAT_STATE_INTRODUCTION,
NV_FORMAT_STATE_FLAGS,
NV_FORMAT_STATE_FIELD_WIDTH,
NV_FORMAT_STATE_PRECISION,
NV_FORMAT_STATE_LENGTH_MODIFIER,
NV_FORMAT_STATE_CONVERSION_SPECIFIER
};
enum {
NV_LENGTH_MODIFIER_NONE,
NV_LENGTH_MODIFIER_CHAR,
NV_LENGTH_MODIFIER_SHORT_INT,
NV_LENGTH_MODIFIER_LONG_INT,
NV_LENGTH_MODIFIER_LONG_LONG_INT
};
#define NV_IS_FLAG(c) \
((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+')
#define NV_IS_LENGTH_MODIFIER(c) \
((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \
(c) == 'z' || (c) == 't')
#define NV_IS_CONVERSION_SPECIFIER(c) \
((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \
(c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \
(c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \
(c) == 's' || (c) == 'p')
#define NV_MAX_NUM_INFO_MMRS 6
NV_STATUS nvos_forward_error_to_cray(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
NvU32 num_info_mmrs;
NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS];
int state = NV_FORMAT_STATE_ORDINARY;
int modifier = NV_LENGTH_MODIFIER_NONE;
NvU32 i, n = 0, m = 0;
memset(info_mmrs, 0, sizeof(info_mmrs));
while (*format != '\0')
{
switch (state)
{
case NV_FORMAT_STATE_ORDINARY:
if (*format == '%')
state = NV_FORMAT_STATE_INTRODUCTION;
break;
case NV_FORMAT_STATE_INTRODUCTION:
if (*format == '%')
{
state = NV_FORMAT_STATE_ORDINARY;
break;
}
case NV_FORMAT_STATE_FLAGS:
if (NV_IS_FLAG(*format))
{
state = NV_FORMAT_STATE_FLAGS;
break;
}
else if (*format == '*')
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
case NV_FORMAT_STATE_FIELD_WIDTH:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
else if (*format == '.')
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
case NV_FORMAT_STATE_PRECISION:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
else if (NV_IS_LENGTH_MODIFIER(*format))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
case NV_FORMAT_STATE_LENGTH_MODIFIER:
if ((*format == 'h') || (*format == 'l'))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
}
switch (state)
{
case NV_FORMAT_STATE_INTRODUCTION:
modifier = NV_LENGTH_MODIFIER_NONE;
break;
case NV_FORMAT_STATE_LENGTH_MODIFIER:
switch (*format)
{
case 'h':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_SHORT_INT
: NV_LENGTH_MODIFIER_CHAR;
break;
case 'l':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_LONG_INT
: NV_LENGTH_MODIFIER_LONG_LONG_INT;
break;
case 'q':
modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT;
default:
return NV_ERR_INVALID_ARGUMENT;
}
break;
case NV_FORMAT_STATE_CONVERSION_SPECIFIER:
switch (*format)
{
case 'c':
case 'd':
case 'i':
x = (unsigned int)va_arg(ap, int);
break;
case 'o':
case 'u':
case 'x':
case 'X':
switch (modifier)
{
case NV_LENGTH_MODIFIER_LONG_LONG_INT:
x = va_arg(ap, unsigned long long int);
break;
case NV_LENGTH_MODIFIER_LONG_INT:
x = va_arg(ap, unsigned long int);
break;
case NV_LENGTH_MODIFIER_CHAR:
case NV_LENGTH_MODIFIER_SHORT_INT:
case NV_LENGTH_MODIFIER_NONE:
x = va_arg(ap, unsigned int);
break;
}
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
state = NV_FORMAT_STATE_ORDINARY;
for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT)
? 2 : 1); i++)
{
if (m == NV_MAX_NUM_INFO_MMRS)
return NV_ERR_INSUFFICIENT_RESOURCES;
info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff));
x >>= 32;
if (++n == 2)
{
m++;
n = 0;
}
}
}
format++;
}
num_info_mmrs = (m + (n != 0));
if (num_info_mmrs > 0)
cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs);
return NV_OK;
}
#endif

1003
kernel-open/nvidia/nv-dma.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,263 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "os_gpio.h"
#define NV_GPIOF_DIR_IN (1 << 0)
NV_STATUS NV_API_CALL nv_gpio_get_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *pinValue
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
int ret;
ret = gpio_get_value(pinNum);
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*pinValue = ret;
return NV_OK;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
return NV_ERR_GENERIC;
#endif
}
void NV_API_CALL nv_gpio_set_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 pinValue
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
gpio_set_value(pinNum, pinValue);
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
#endif
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
int ret;
if (direction)
{
ret = gpio_direction_input(pinNum);
}
else
{
ret = gpio_direction_output(pinNum, 0);
}
if (ret)
{
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
return NV_OK;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
return NV_ERR_GENERIC;
#endif
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *direction
)
{
/*!
* TODO: Commenting out until gpio_get_direction wrapper
* support is added in kernel.
*/
#if 0
int ret;
ret = nv_gpio_get_direction(pinNum);
if (ret)
{
nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*direction = ret;
#endif
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_number
(
nv_state_t *nv,
NvU32 function,
NvU32 *pinNum
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
/*!
* @brief Mapping array of OS GPIO function ID to OS function name,
* this name is used to get GPIO number from Device Tree.
*/
static const char *osMapGpioFunc[] = {
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
[NV_OS_GPIO_FUNC_HOTPLUG_C] = "os_gpio_hotplug_c",
[NV_OS_GPIO_FUNC_HOTPLUG_D] = "os_gpio_hotplug_d",
};
rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: of_get_name_gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
*pinNum = rc;
rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN,
osMapGpioFunc[function]);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: request gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
return NV_OK;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
return NV_ERR_GENERIC;
#endif
}
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
NvU32 irqGpioPin;
NvU32 pinValue;
if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE)
{
return NV_FALSE;
}
nv_get_current_irq_priv_data(nv, &irqGpioPin);
if (pinNum != irqGpioPin)
{
return NV_FALSE;
}
pinValue = gpio_get_value(pinNum);
if (pinValue != direction)
{
return NV_FALSE;
}
return NV_TRUE;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
return NV_FALSE;
#endif
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
(
nv_state_t * nv,
NvU32 pinNum,
NvU32 trigger_level
)
{
#if NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
int irq_num;
irq_num = gpio_to_irq(pinNum);
/*
* Ignore setting interrupt for falling trigger for hotplug gpio pin
* as hotplug sequence calls this function twice to set the level
* (rising/falling) of interrupt for same gpio pin. Linux interrupt
* registration allows only once to register the interrupt with required
* trigger levels. So to avoid re-registration, skip registering for
* falling trigger level but when this function called with rising trigger
* then itself register for both rising/falling triggers.
*/
if (trigger_level == 0)
{
return NV_OK;
}
rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE,
(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT), pinNum,
"hdmi-hotplug");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: IRQ registration failed for gpio - %d, rc - %d\n",
pinNum, rc);
return NV_ERR_GENERIC;
}
/* Disable the irq after registration as RM init sequence re-enables it */
disable_irq_nosync(irq_num);
return NV_OK;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n");
return NV_ERR_GENERIC;
#endif
}

View File

@@ -0,0 +1,83 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_LINUX_NVHOST_H_PRESENT)
#include <linux/host1x-next.h>
#include <linux/nvhost.h>
#if defined(NV_LINUX_NVHOST_T194_H_PRESENT)
#include <linux/nvhost_t194.h>
#endif
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
struct platform_device *host1x_pdev;
phys_addr_t base;
struct host1x *host1x;
NvU32 stride;
NvU32 num_syncpts;
NvS32 ret;
host1x_pdev = nvhost_get_default_device();
if (host1x_pdev == NULL)
{
return NV_ERR_INVALID_DEVICE;
}
host1x = platform_get_drvdata(host1x_pdev);
ret = host1x_syncpt_get_shim_info(host1x, &base, &stride, &num_syncpts);
if ((ret != 0) || (syncpointId >= num_syncpts))
{
return NV_ERR_INVALID_DATA;
}
*physAddr = base;
*limit = stride;
*offset = stride * syncpointId;
return NV_OK;
}
#else
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

558
kernel-open/nvidia/nv-i2c.c Normal file
View File

@@ -0,0 +1,558 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/i2c.h>
#include "os-interface.h"
#include "nv-linux.h"
#include "nvi2c.h"
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
unsigned int i = 0;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
const unsigned int supported_i2c_flags = I2C_M_RD
#if defined(I2C_M_DMA_SAFE)
| I2C_M_DMA_SAFE
#endif
;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++)
{
if (msgs[i].flags & ~supported_i2c_flags)
{
/* we only support basic I2C reads/writes, reject any other commands */
rc = -EINVAL;
nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n",
msgs[i].flags);
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
else
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(msgs[i].flags & I2C_M_RD) ?
NV_I2C_CMD_READ : NV_I2C_CMD_WRITE,
(NvU8)(msgs[i].addr & 0x7f), 0,
(NvU32)(msgs[i].len & 0xffffUL),
(NvU8 *)msgs[i].buf);
}
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : num;
}
static int nv_i2c_algo_smbus_xfer(
struct i2c_adapter *adapter,
u16 addr,
unsigned short flags,
char read_write,
u8 command,
int size,
union i2c_smbus_data *data
)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
switch (size)
{
case I2C_SMBUS_QUICK:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_QUICK_READ :
NV_I2C_CMD_SMBUS_QUICK_WRITE,
(NvU8)(addr & 0x7f), 0, 0, NULL);
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ)
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_READ,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data->byte);
}
else
{
u8 data = command;
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_WRITE,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data);
}
break;
case I2C_SMBUS_BYTE_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 1,
(NvU8 *)&data->byte);
break;
case I2C_SMBUS_WORD_DATA:
if (read_write != I2C_SMBUS_READ)
{
u16 word = data->word;
data->block[1] = (word & 0xff);
data->block[2] = (word >> 8);
}
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 2,
(NvU8 *)&data->block[1]);
if (read_write == I2C_SMBUS_READ)
{
data->word = ((NvU16)data->block[1]) |
((NvU16)data->block[2] << 8);
}
break;
case I2C_SMBUS_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_BLOCK_READ :
NV_I2C_CMD_SMBUS_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
sizeof(data->block),
(NvU8 *)data->block);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_BLOCK_READ :
NV_I2C_CMD_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
(NvU8)data->block[0],
(NvU8 *)&data->block[1]);
break;
default:
rc = -EINVAL;
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : 0;
}
static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
u32 ret = I2C_FUNC_I2C;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return 0;
}
if (rm_i2c_is_smbus_capable(sp, nv, adapter))
{
ret |= (I2C_FUNC_SMBUS_QUICK |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK);
}
nv_kmem_cache_free_stack(sp);
return ret;
}
static struct i2c_algorithm nv_i2c_algo = {
.master_xfer = nv_i2c_algo_master_xfer,
.smbus_xfer = nv_i2c_algo_smbus_xfer,
.functionality = nv_i2c_algo_functionality,
};
struct i2c_adapter nv_i2c_adapter_prototype = {
.owner = THIS_MODULE,
.algo = &nv_i2c_algo,
.algo_data = NULL,
};
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
NV_STATUS rmStatus;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *pI2cAdapter = NULL;
int osstatus = 0;
// get a i2c adapter
rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter));
if (rmStatus != NV_OK)
return NULL;
// fill in with default structure
os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter));
pI2cAdapter->dev.parent = nvl->dev;
if (nvl->pci_dev != NULL)
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA i2c adapter %u at %x:%02x.%u\n", port, nv->pci_info.bus,
nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn));
}
else
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA SOC i2c adapter %u\n", port);
}
// add our data to the structure
pI2cAdapter->algo_data = (void *)nv;
// attempt to register with the kernel
osstatus = i2c_add_adapter(pI2cAdapter);
if (osstatus)
{
// free the memory and NULL the ptr
os_free_mem(pI2cAdapter);
pI2cAdapter = NULL;
}
return ((void *)pI2cAdapter);
}
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data;
if (pI2cAdapter)
{
// release with the OS
i2c_del_adapter(pI2cAdapter);
os_free_mem(pI2cAdapter);
}
}
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
static struct i2c_client * nv_i2c_register_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
int c_index;
struct i2c_board_info i2c_dev_info = {
.type = "tegra_display",
.addr = address,
};
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n",
linuxI2CSwPort);
return NULL;
}
#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT)
client = i2c_new_client_device(i2c_adapter, &i2c_dev_info);
#else
nv_printf(NV_DBG_ERRORS, "NVRM: nv_i2c_new_device not present\n");
client = NULL;
#endif
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to register client for address(0x%x)\n",
address);
i2c_put_adapter(i2c_adapter);
return NULL;
}
i2c_put_adapter(i2c_adapter);
/* Save the Port and i2c client */
nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL)
{
nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client;
break;
}
}
return client;
}
static struct i2c_client *nv_i2c_get_registered_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int c_index;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index];
if (client)
{
if (address == (NvU8)client->addr)
{
return client;
}
}
else
{
break;
}
}
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
struct i2c_client *client;
struct i2c_msg *msgs;
int count;
int rc;
NV_STATUS status = NV_OK;
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
address = address >> 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
for (count = 0; count < num_msgs; count++) {
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
nv_msgs[count].addr = nv_msgs[count].addr >> 1;
client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: i2c client register failed for addr:0x%x\n",
nv_msgs[count].addr);
return NV_ERR_GENERIC;
}
}
}
msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL);
if (msgs == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: i2c message allocation failed\n");
return NV_ERR_NO_MEMORY;
}
for (count = 0; count < num_msgs; count++) {
msgs[count].addr = nv_msgs[count].addr;
msgs[count].flags = nv_msgs[count].flags;
msgs[count].len = nv_msgs[count].len;
msgs[count].buf = nv_msgs[count].buf;
}
rc = i2c_transfer(client->adapter, msgs, num_msgs);
if (rc != num_msgs)
{
nv_printf(NV_DBG_ERRORS, "NVRM: i2c transfer failed for addr: 0x%x\n",
address);
status = NV_ERR_GENERIC;
}
kfree(msgs);
return status;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int p_index, c_index;
for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++)
{
for (c_index = 0;
c_index < MAX_CLIENTS_PER_ADAPTER;
c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index];
if (client)
{
i2c_unregister_device(client);
nvl->i2c_clients[p_index].pOsClient[c_index] = NULL;
}
}
}
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvS32 *scl,
NvS32 *sda)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT_i2c_bus_status
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
int ret;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n",
linuxI2CSwPort);
return NULL;
}
ret = i2c_bus_status(i2c_adapter, scl, sda);
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: i2c_bus_status failed:%d\n", ret);
return NV_ERR_GENERIC;
}
i2c_put_adapter(i2c_adapter);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
#endif // NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
#if !defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE)
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
}
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
return NULL;
}
#endif
#if !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE || \
(NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE && \
(!defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE)))
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
return NV_OK;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvS32 *scl,
NvS32 *sda)
{
return NV_ERR_GENERIC;
}
#endif

350
kernel-open/nvidia/nv-imp.c Normal file
View File

@@ -0,0 +1,350 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp-abi.h>
#endif
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp.h>
#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT)
#include <soc/tegra/tegra_bpmp.h>
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT
#include <dt-bindings/interconnect/tegra_icc_id.h>
#endif
#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT
#include <linux/platform/tegra/mc_utils.h>
#endif
//
// IMP requires information from various BPMP and MC driver functions. The
// macro below checks that all of the required functions are present.
//
#define IMP_SUPPORT_FUNCTIONS_PRESENT \
(defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \
IS_ENABLED(CONFIG_TEGRA_BPMP)) && \
defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT)
//
// Also create a macro to check if all the required ICC symbols are present.
// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h.
//
#define ICC_SUPPORT_FUNCTIONS_PRESENT \
defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT)
/*!
* @brief Returns IMP-relevant data collected from other modules
*
* @param[out] tegra_imp_import_data Structure to receive the data
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
*/
NV_STATUS NV_API_CALL
nv_imp_get_import_data
(
TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data
)
{
#if IMP_SUPPORT_FUNCTIONS_PRESENT
tegra_imp_import_data->num_dram_channels = get_dram_num_channels();
nv_printf(NV_DBG_INFO, "NVRM: num_dram_channels = %u\n",
tegra_imp_import_data->num_dram_channels);
return NV_OK;
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Tells BPMP whether or not RFL is valid
*
* Display HW generates an ok_to_switch signal which asserts when mempool
* occupancy is high enough to be able to turn off memory long enough to
* execute a dramclk frequency switch without underflowing display output.
* ok_to_switch drives the RFL ("request for latency") signal in the memory
* unit, and the switch sequencer waits for this signal to go active before
* starting a dramclk switch. However, if the signal is not valid (e.g., if
* display HW or SW has not been initialized yet), the switch sequencer ignores
* the signal. This API tells BPMP whether or not the signal is valid.
*
* @param[in] nv Per GPU Linux state
* @param[in] bEnable True if RFL will be valid; false if invalid
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_enable_disable_rfl
(
nv_state_t *nv,
NvBool bEnable
)
{
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
#if IMP_SUPPORT_FUNCTIONS_PRESENT
#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev);
struct tegra_bpmp_message msg;
struct mrq_emc_disp_rfl_request emc_disp_rfl_request;
int rc;
memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request));
emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED :
EMC_DISP_RFL_MODE_DISABLED;
msg.mrq = MRQ_EMC_DISP_RFL;
msg.tx.data = &emc_disp_rfl_request;
msg.tx.size = sizeof(emc_disp_rfl_request);
msg.rx.data = NULL;
msg.rx.size = 0;
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc == 0)
{
nv_printf(NV_DBG_INFO,
"\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n",
bEnable ? "enabled" : "disabled");
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n",
bEnable ? "enable" : "disable",
rc);
status = NV_ERR_GENERIC;
}
#else
nv_printf(NV_DBG_ERRORS, "nv_imp_enable_disable_rfl stub called!\n");
#endif
#endif
return status;
}
/*!
* @brief Obtains a handle for the display data path
*
* If a handle is obtained successfully, it is not returned to the caller; it
* is saved for later use by subsequent nv_imp_icc_set_bw calls.
* nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw.
*
* @param[out] nv Per GPU Linux state
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_get
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_OK;
#if defined(NV_DEVM_ICC_GET_PRESENT)
// Needs to use devm_of_icc_get function as per the latest ICC driver
nvl->nv_imp_icc_path =
devm_of_icc_get(nvl->dev, "read-1");
if (nvl->nv_imp_icc_path == NULL)
{
nv_printf(NV_DBG_INFO, "NVRM: devm_of_icc_get failed\n");
return NV_ERR_NOT_SUPPORTED;
}
else if (!IS_ERR(nvl->nv_imp_icc_path))
{
nvl->is_upstream_icc_path = NV_TRUE;
return NV_OK;
}
//
// Till we modify all DTs to have interconnect node specified as per
// the latest ICC driver, fallback to older ICC mechanism.
//
#endif
nvl->nv_imp_icc_path = NULL;
#if defined(NV_ICC_GET_PRESENT)
struct device_node *np;
// Check if ICC is present in the device tree, and enabled.
np = of_find_node_by_path("/icc");
if (np != NULL)
{
if (of_device_is_available(np))
{
// Get the ICC data path.
nvl->nv_imp_icc_path =
icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY);
}
of_node_put(np);
}
#else
nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n");
return NV_ERR_NOT_SUPPORTED;
#endif
if (nvl->nv_imp_icc_path == NULL)
{
nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n");
status = NV_ERR_NOT_SUPPORTED;
}
else if (IS_ERR(nvl->nv_imp_icc_path))
{
nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %ld\n",
PTR_ERR(nvl->nv_imp_icc_path));
nvl->nv_imp_icc_path = NULL;
status = NV_ERR_GENERIC;
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Releases the handle obtained by nv_imp_icc_get
*
* @param[in] nv Per GPU Linux state
*/
void
nv_imp_icc_put
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
#if defined(NV_DEVM_ICC_GET_PRESENT)
//
// If devm_of_icc_get API is used for requesting the bandwidth,
// it does not require to call put explicitly.
//
if (nvl->is_upstream_icc_path)
{
goto done;
}
#endif
#if defined(NV_ICC_PUT_PRESENT) && NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
if (nvl->nv_imp_icc_path != NULL)
{
icc_put(nvl->nv_imp_icc_path);
}
#else
nv_printf(NV_DBG_ERRORS, "icc_put() not present\n");
#endif
done:
nvl->nv_imp_icc_path = NULL;
#endif
}
/*!
* @brief Allocates a specified amount of ISO memory bandwidth for display
*
* floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency
* multiplied by the width of the pipe over which the display data will travel.
* (It is understood that the bandwidth calculated by multiplying the clock
* frequency by the pipe width will not be realistically achievable, due to
* overhead in the memory subsystem. ICC will not actually use the bandwidth
* value, except to reverse the calculation to get the required dramclk
* frequency.)
*
* nv_imp_icc_get must be called prior to calling this function.
*
* @param[in] nv Per GPU Linux state
* @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested
* @param[in] floor_bw_kbps Min required dramclk freq * pipe width
*
* @returns NV_OK if successful,
* NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
* high, and bandwidth cannot be allocated,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_set_bw
(
nv_state_t *nv,
NvU32 avg_bw_kbps,
NvU32 floor_bw_kbps
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
NV_STATUS status = NV_OK;
//
// avg_bw_kbps can be either ISO bw request or NISO bw request.
// Use floor_bw_kbps to make floor requests.
//
#if defined(NV_ICC_SET_BW_PRESENT)
//
// nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled.
// In this case, skip the allocation call, and just return a success
// status.
//
if (nvl->nv_imp_icc_path == NULL)
{
return NV_OK;
}
rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps);
#else
nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n");
return NV_ERR_NOT_SUPPORTED;
#endif
if (rc < 0)
{
// A negative return value indicates an error.
if (rc == -ENOMEM)
{
status = NV_ERR_INSUFFICIENT_RESOURCES;
}
else
{
status = NV_ERR_GENERIC;
}
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,143 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "dce_rm_client_ipc.h"
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
#include <linux/platform/tegra/dce/dce-client-ipc.h>
static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = {
[DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM,
[DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT,
};
static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType)
{
if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
return NV_OK;
}
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX;
interfaceType++)
{
if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType)
return interfaceType;
}
return NV_ERR_INVALID_DATA;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX;
if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK)
{
return NV_ERR_INVALID_ARGUMENT;
}
dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType];
return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle);
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
struct dce_ipc_message dce_ipc_msg;
memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message));
dce_ipc_msg.tx.data = msg;
dce_ipc_msg.rx.data = msg;
dce_ipc_msg.tx.size = msgLength;
dce_ipc_msg.rx.size = msgLength;
return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg);
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return tegra_dce_unregister_ipc_client(clientId);
}
#else
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -0,0 +1,329 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bug.h>
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
static const unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if (i == (attempts - 1))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION_ONSTACK(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -0,0 +1,259 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-memdbg.h"
#include "nv-linux.h"
/* track who's allocating memory and print out a list of leaked allocations at
* teardown.
*/
#define NV_MEM_LOGGER_STACK_TRACE 0
#if defined(NV_STACK_TRACE_PRESENT) && defined(NV_MEM_LOGGER) && defined(DEBUG)
#define NV_MEM_LOGGER_STACK_TRACE 1
#endif
typedef struct {
struct rb_node rb_node;
void *addr;
NvU64 size;
NvU32 line;
const char *file;
#if NV_MEM_LOGGER_STACK_TRACE == 1
unsigned long stack_trace[32];
#endif
} nv_memdbg_node_t;
struct
{
struct rb_root rb_root;
NvU64 untracked_bytes;
NvU64 num_untracked_allocs;
nv_spinlock_t lock;
} g_nv_memdbg;
void nv_memdbg_init(void)
{
NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock);
g_nv_memdbg.rb_root = RB_ROOT;
}
static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node)
{
return rb_entry(rb_node, nv_memdbg_node_t, rb_node);
}
static void nv_memdbg_insert_node(nv_memdbg_node_t *new)
{
nv_memdbg_node_t *node;
struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node;
struct rb_node *rb_parent = NULL;
while (*rb_node)
{
node = nv_memdbg_node_entry(*rb_node);
WARN_ON(new->addr == node->addr);
rb_parent = *rb_node;
if (new->addr < node->addr)
rb_node = &(*rb_node)->rb_left;
else
rb_node = &(*rb_node)->rb_right;
}
rb_link_node(&new->rb_node, rb_parent, rb_node);
rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root);
}
static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr)
{
nv_memdbg_node_t *node = NULL;
struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node;
while (rb_node)
{
node = nv_memdbg_node_entry(rb_node);
if (addr == node->addr)
break;
else if (addr < node->addr)
rb_node = rb_node->rb_left;
else
rb_node = rb_node->rb_right;
}
WARN_ON(!node || node->addr != addr);
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
return node;
}
void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
if (addr == NULL)
{
return;
}
/* If node allocation fails, we can still update the untracked counters */
node = kmalloc(sizeof(*node),
NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC);
if (node)
{
node->addr = addr;
node->size = size;
node->file = file;
node->line = line;
#if NV_MEM_LOGGER_STACK_TRACE == 1
memset(node->stack_trace, '\0', sizeof(node->stack_trace));
stack_trace_save(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 0);
#endif
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
if (node)
{
nv_memdbg_insert_node(node);
}
else
{
++g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes += size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
}
void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
if (addr == NULL)
{
return;
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
node = nv_memdbg_remove_node(addr);
if (!node)
{
WARN_ON(g_nv_memdbg.num_untracked_allocs == 0);
WARN_ON(g_nv_memdbg.untracked_bytes < size);
--g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes -= size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
if (node)
{
if ((size != 0) && (node->size != size))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: size mismatch on free: %llu != %llu\n",
size, node->size);
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p @ %s:%d\n",
node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p\n",
node->addr);
}
os_dbg_breakpoint();
}
kfree(node);
}
}
void nv_memdbg_exit(void)
{
nv_memdbg_node_t *node;
NvU64 leaked_bytes = 0, num_leaked_allocs = 0;
if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: list of leaked memory allocations:\n");
}
while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root));
leaked_bytes += node->size;
++num_leaked_allocs;
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p @ %s:%d\n",
node->size, node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p\n",
node->size, node->addr);
}
#if NV_MEM_LOGGER_STACK_TRACE == 1
stack_trace_print(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 1);
#endif
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
kfree(node);
}
/* If we failed to allocate a node at some point, we may have leaked memory
* even if the tree is empty */
if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: total leaked memory: %llu bytes in %llu allocations\n",
leaked_bytes + g_nv_memdbg.untracked_bytes,
num_leaked_allocs + g_nv_memdbg.num_untracked_allocs);
if (g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes in %llu allocations untracked\n",
g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs);
}
}
}

View File

@@ -0,0 +1,860 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv_speculation_barrier.h"
/*
* The 'struct vm_operations' open() callback is called by the Linux
* kernel when the parent VMA is split or copied, close() when the
* current VMA is about to be deleted.
*
* We implement these callbacks to keep track of the number of user
* mappings of system memory allocations. This was motivated by a
* subtle interaction problem between the driver and the kernel with
* respect to the bookkeeping of pages marked reserved and later
* mapped with mmap().
*
* Traditionally, the Linux kernel ignored reserved pages, such that
* when they were mapped via mmap(), the integrity of their usage
* counts depended on the reserved bit being set for as long as user
* mappings existed.
*
* Since we mark system memory pages allocated for DMA reserved and
* typically map them with mmap(), we need to ensure they remain
* reserved until the last mapping has been torn down. This worked
* correctly in most cases, but in a few, the RM API called into the
* RM to free memory before calling munmap() to unmap it.
*
* In the past, we allowed nv_free_pages() to remove the 'at' from
* the parent device's allocation list in this case, but didn't
* release the underlying pages until the last user mapping had been
* destroyed:
*
* In nvidia_vma_release(), we freed any resources associated with
* the allocation (IOMMU mappings, etc.) and cleared the
* underlying pages' reserved bits, but didn't free them. The kernel
* was expected to do this.
*
* This worked in practise, but made dangerous assumptions about the
* kernel's behavior and could fail in some cases. We now handle
* this case differently (see below).
*/
static void
nvidia_vma_open(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL)
{
NV_ATOMIC_INC(at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
}
}
/*
* (see above for additional information)
*
* If the 'at' usage count drops to zero with the updated logic, the
* the allocation is recorded in the free list of the private
* data associated with the file pointer; nvidia_close() uses this
* list to perform deferred free operations when the parent file
* descriptor is closed. This will typically happen when the process
* exits.
*
* Since this is technically a workaround to handle possible fallout
* from misbehaving clients, we additionally print a warning.
*/
static void
nvidia_vma_release(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
static int count = 0;
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL && nv_alloc_release(nvlfp, at))
{
if ((at->pid == os_get_current_process()) &&
(count++ < NV_MAX_RECURRING_WARNING_MESSAGES))
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: late unmap, comm: %s, 0x%p\n",
__FUNCTION__, current->comm, at);
}
}
}
static int
nvidia_vma_access(
struct vm_area_struct *vma,
unsigned long addr,
void *buffer,
int length,
int write
)
{
nv_alloc_t *at = NULL;
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr);
NvU32 pageIndex, pageOffset;
void *kernel_mapping;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
NvU64 offsInVma = addr - vma->vm_start;
pageIndex = (offsInVma >> PAGE_SHIFT);
pageOffset = (offsInVma & ~PAGE_MASK);
if (length < 0)
{
return -EINVAL;
}
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
return -EINVAL;
}
if (write && !(mmap_context->prot & NV_PROTECT_WRITEABLE))
{
return -EACCES;
}
if (nv->flags & NV_FLAG_CONTROL)
{
at = NV_VMA_PRIVATE(vma);
/*
* at can be NULL for peer IO mem.
*/
if (!at)
return -EINVAL;
if (pageIndex >= at->num_pages)
return -EINVAL;
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
kernel_mapping = (void *)(at->page_table[pageIndex].virt_addr + pageOffset);
}
else
{
NvU64 idx = 0;
NvU64 curOffs = 0;
for(; idx < mmap_context->memArea.numRanges; idx++)
{
NvU64 nextOffs = mmap_context->memArea.pRanges[idx].size + curOffs;
if (curOffs <= offsInVma && nextOffs > offsInVma)
{
NvU64 realAddr = offsInVma - curOffs + mmap_context->memArea.pRanges[idx].start;
addr = realAddr & PAGE_MASK;
goto found;
}
curOffs = nextOffs;
}
return -EINVAL;
found:
kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED);
if (kernel_mapping == NULL)
return -ENOMEM;
kernel_mapping = ((char *)kernel_mapping + pageOffset);
}
length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset));
if (write)
memcpy(kernel_mapping, buffer, length);
else
memcpy(buffer, kernel_mapping, length);
if (at == NULL)
{
kernel_mapping = ((char *)kernel_mapping - pageOffset);
os_unmap_kernel_space(kernel_mapping, PAGE_SIZE);
}
return length;
}
static vm_fault_t nvidia_fault(
#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma,
#endif
struct vm_fault *vmf
)
{
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma = vmf->vma;
#endif
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_linux_state_t *nvl = nvlfp->nvptr;
nv_state_t *nv = NV_STATE_PTR(nvl);
vm_fault_t ret = VM_FAULT_NOPAGE;
if (vma->vm_pgoff != 0)
{
return VM_FAULT_SIGBUS;
}
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return VM_FAULT_SIGBUS;
}
// Wake up GPU and reinstate mappings only if we are not in S3/S4 entry
if (!down_read_trylock(&nv_system_pm_lock))
{
return VM_FAULT_NOPAGE;
}
down(&nvl->mmap_lock);
// Wake up the GPU if it is not currently safe to mmap.
if (!nvl->safe_to_mmap)
{
NV_STATUS status;
if (!nvl->gpu_wakeup_callback_needed)
{
// GPU wakeup callback already scheduled.
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
/*
* GPU wakeup cannot be completed directly in the fault handler due to the
* inability to take the GPU lock while mmap_lock is held.
*/
status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv);
if (status != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status);
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_SIGBUS;
}
// Ensure that we do not schedule duplicate GPU wakeup callbacks.
nvl->gpu_wakeup_callback_needed = NV_FALSE;
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
{
NvU64 idx;
NvU64 curOffs = 0;
NvBool bRevoked = NV_TRUE;
nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
for(idx = 0; idx < mmap_context->memArea.numRanges; idx++)
{
NvU64 nextOffs = curOffs + mmap_context->memArea.pRanges[idx].size;
NvU64 pfn = mmap_context->memArea.pRanges[idx].start >> PAGE_SHIFT;
NvU64 numPages = mmap_context->memArea.pRanges[idx].size >> PAGE_SHIFT;
while (numPages != 0)
{
ret = nv_insert_pfn(vma, curOffs + vma->vm_start, pfn);
if (ret != VM_FAULT_NOPAGE)
{
goto err;
}
bRevoked = NV_FALSE;
curOffs += PAGE_SIZE;
pfn++;
numPages--;
}
curOffs = nextOffs;
}
err:
nvl->all_mappings_revoked &= bRevoked;
}
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return ret;
}
static struct vm_operations_struct nv_vm_ops = {
.open = nvidia_vma_open,
.close = nvidia_vma_release,
.fault = nvidia_fault,
.access = nvidia_vma_access,
};
int nv_encode_caching(
pgprot_t *prot,
NvU32 cache_type,
nv_memory_type_t memory_type
)
{
pgprot_t tmp;
if (prot == NULL)
{
tmp = __pgprot(0);
prot = &tmp;
}
switch (cache_type)
{
case NV_MEMORY_UNCACHED_WEAK:
#if defined(NV_PGPROT_UNCACHED_WEAK)
*prot = NV_PGPROT_UNCACHED_WEAK(*prot);
break;
#endif
case NV_MEMORY_UNCACHED:
*prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ?
NV_PGPROT_UNCACHED(*prot) :
NV_PGPROT_UNCACHED_DEVICE(*prot);
break;
#if defined(NV_PGPROT_WRITE_COMBINED) && \
defined(NV_PGPROT_WRITE_COMBINED_DEVICE)
case NV_MEMORY_DEFAULT:
case NV_MEMORY_WRITECOMBINED:
if (NV_ALLOW_WRITE_COMBINING(memory_type))
{
*prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ?
NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) :
NV_PGPROT_WRITE_COMBINED(*prot);
break;
}
/*
* If WC support is unavailable, we need to return an error
* code to the caller, but need not print a warning.
*
* For frame buffer memory, callers are expected to use the
* UC- memory type if we report WC as unsupported, which
* translates to the effective memory type WC if a WC MTRR
* exists or else UC.
*/
return 1;
#endif
case NV_MEMORY_CACHED:
if (!NV_ALLOW_CACHING(memory_type))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: memory type %d does not allow caching!\n",
memory_type);
return 1;
}
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: cache type %d not supported for memory type %d!\n",
cache_type, memory_type);
return 1;
}
return 0;
}
static int nvidia_mmap_peer_io(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
int ret;
NvU64 start;
NvU64 size;
BUG_ON(!at->flags.contig);
start = at->page_table[page_index].phys_addr;
size = pages * PAGE_SIZE;
ret = nv_io_remap_page_range(vma, start, size, vma->vm_start);
return ret;
}
static int nvidia_mmap_sysmem(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
NvU64 j;
int ret = 0;
unsigned long start = 0;
NV_ATOMIC_INC(at->usage_count);
start = vma->vm_start;
for (j = page_index; j < (page_index + pages); j++)
{
j = nv_array_index_no_speculate(j, (page_index + pages));
//
// nv_remap_page_range() map a contiguous physical address space
// into the user virtual space.
// Use PFN based mapping api to create the mapping for
// reserved carveout (OS invisible memory, not managed by OS) too.
// Basically nv_remap_page_range() works for all kind of memory regions.
// Imported buffer can be either from OS or Non OS managed regions (reserved carveout).
// nv_remap_page_range() works well for all type of import buffers.
//
if (
#if defined(NV_VGPU_KVM_BUILD)
at->flags.guest ||
#endif
at->flags.carveout || at->import_sgt)
{
ret = nv_remap_page_range(vma, start, at->page_table[j].phys_addr,
PAGE_SIZE, vma->vm_page_prot);
}
else
{
if (at->flags.unencrypted)
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot);
ret = vm_insert_page(vma, start,
NV_GET_PAGE_STRUCT(at->page_table[j].phys_addr));
}
if (ret)
{
NV_ATOMIC_DEC(at->usage_count);
nv_printf(NV_DBG_ERRORS,
"NVRM: Userspace mapping creation failed [%d]!\n", ret);
return -EAGAIN;
}
start += PAGE_SIZE;
}
return ret;
}
static int nvidia_mmap_numa(
struct vm_area_struct *vma,
const nv_alloc_mapping_context_t *mmap_context)
{
NvU64 start, addr;
NvU64 pages;
NvU64 i;
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
start = vma->vm_start;
if (mmap_context->num_pages < pages)
{
return -EINVAL;
}
// Needed for the linux kernel for mapping compound pages
nv_vm_flags_set(vma, VM_MIXEDMAP);
for (i = 0, addr = mmap_context->page_array[0]; i < pages;
addr = mmap_context->page_array[++i], start += PAGE_SIZE)
{
if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0)
{
return -EAGAIN;
}
}
return 0;
}
int nvidia_mmap_helper(
nv_state_t *nv,
nv_linux_file_private_t *nvlfp,
nvidia_stack_t *sp,
struct vm_area_struct *vma,
void *vm_priv
)
{
NvU32 prot = 0;
int ret;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
if (nvlfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
/*
* If mmap context is not valid on this file descriptor, this mapping wasn't
* previously validated with the RM so it must be rejected.
*/
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n");
return -EINVAL;
}
if (vma->vm_pgoff != 0)
{
return -EINVAL;
}
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
status = nv_check_gpu_state(nv);
if (status != NV_OK)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv,
"GPU is lost, skipping nvidia_mmap_helper\n");
return status;
}
NV_VMA_PRIVATE(vma) = vm_priv;
prot = mmap_context->prot;
/*
* Nvidia device node(nvidia#) maps device's BAR memory,
* Nvidia control node(nvidiactrl) maps system memory.
*/
if (!NV_IS_CTL_DEVICE(nv))
{
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
// Ensure size is correct.
if (NV_VMA_SIZE(vma) != memareaSize(mmap_context->memArea))
{
return -ENXIO;
}
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_REGISTERS))
{
return -ENXIO;
}
}
else if (IS_FB_OFFSET(nv, access_start, access_len))
{
if (IS_UD_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
if (nv_encode_caching(&vma->vm_page_prot,
NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
}
}
down(&nvl->mmap_lock);
if (nvl->safe_to_mmap)
{
nvl->all_mappings_revoked = NV_FALSE;
//
// This path is similar to the sysmem mapping code.
// TODO: Refactor is needed as part of bug#2001704.
//
if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) &&
!IS_REG_OFFSET(nv, access_start, access_len) &&
(mmap_context->num_pages != 0))
{
ret = nvidia_mmap_numa(vma, mmap_context);
if (ret)
{
up(&nvl->mmap_lock);
return ret;
}
}
else
{
NvU64 idx = 0;
NvU64 curOffs = 0;
for(; idx < mmap_context->memArea.numRanges; idx++)
{
NvU64 nextOffs = curOffs + mmap_context->memArea.pRanges[idx].size;
if (nv_io_remap_page_range(vma,
mmap_context->memArea.pRanges[idx].start,
mmap_context->memArea.pRanges[idx].size,
vma->vm_start + curOffs) != 0)
{
up(&nvl->mmap_lock);
return -EAGAIN;
}
curOffs = nextOffs;
}
}
}
up(&nvl->mmap_lock);
nv_vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND);
}
else
{
nv_alloc_t *at;
NvU64 page_index;
NvU64 pages;
NvU64 mmap_size;
at = (nv_alloc_t *)mmap_context->alloc;
page_index = mmap_context->page_index;
mmap_size = NV_VMA_SIZE(vma);
pages = mmap_size >> PAGE_SHIFT;
if ((page_index + pages) > at->num_pages)
{
return -ERANGE;
}
/*
* Callers that pass in non-NULL VMA private data must never reach this
* code. They should be mapping on a non-control node.
*/
BUG_ON(NV_VMA_PRIVATE(vma));
if (at->flags.peer_io)
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_DEVICE_MMIO))
{
return -ENXIO;
}
/*
* There is no need to keep 'peer IO at' alive till vma_release like
* 'sysmem at' because there are no security concerns where a client
* could free RM allocated sysmem before unmapping it. Hence, vm_ops
* are NOP, and at->usage_count is never being used.
*/
NV_VMA_PRIVATE(vma) = NULL;
ret = nvidia_mmap_peer_io(vma, at, page_index, pages);
BUG_ON(NV_VMA_PRIVATE(vma));
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
nv_vm_flags_set(vma, VM_IO);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_SYSTEM))
{
return -ENXIO;
}
NV_VMA_PRIVATE(vma) = at;
ret = nvidia_mmap_sysmem(vma, at, page_index, pages);
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
//
// VM_MIXEDMAP will be set by vm_insert_page() in nvidia_mmap_sysmem().
// VM_SHARED is added to avoid any undesired copy-on-write effects.
//
nv_vm_flags_set(vma, VM_SHARED);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
nv_vm_flags_clear(vma, VM_WRITE);
nv_vm_flags_clear(vma, VM_MAYWRITE);
}
vma->vm_ops = &nv_vm_ops;
return 0;
}
int nvidia_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
nv_linux_state_t *nvl;
nv_state_t *nv;
nvidia_stack_t *sp = NULL;
int status;
//
// Do not allow mmap operation if this is a fd into
// which rm objects have been exported.
//
if (nvlfp->nvfp.handles != NULL)
{
return -EINVAL;
}
if (!nv_is_control_device(NV_FILE_INODE(file)))
{
status = nv_wait_open_complete_interruptible(nvlfp);
if (status != 0)
return status;
}
nvl = nvlfp->nvptr;
if (nvl == NULL)
return -EIO;
nv = NV_STATE_PTR(nvl);
status = nv_kmem_cache_alloc_stack(&sp);
if (status != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for mmap\n");
return status;
}
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
nv_kmem_cache_free_stack(sp);
return status;
}
void
nv_revoke_gpu_mappings_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_linux_file_private_t *nvlfp;
/* Revoke all mappings for every open file */
list_for_each_entry (nvlfp, &nvl->open_files, entry)
{
unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1);
}
nvl->all_mappings_revoked = NV_TRUE;
}
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return NV_ERR_NOT_SUPPORTED;
}
down(&nvl->mmap_lock);
nv_revoke_gpu_mappings_locked(nv);
up(&nvl->mmap_lock);
return NV_OK;
}
void NV_API_CALL nv_acquire_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
down(&nvl->mmap_lock);
}
void NV_API_CALL nv_release_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
up(&nvl->mmap_lock);
}
NvBool NV_API_CALL nv_get_all_mappings_revoked_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock for all decisions based on this
return nvl->all_mappings_revoked;
}
void NV_API_CALL nv_set_safe_to_mmap_locked(
nv_state_t *nv,
NvBool safe_to_mmap
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock
/*
* If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to
* need to schedule a GPU wakeup callback when we fault.
*
* nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault()
* after scheduling the GPU wakeup callback, preventing us from scheduling
* duplicates.
*/
if (!safe_to_mmap && nvl->safe_to_mmap)
{
nvl->gpu_wakeup_callback_needed = NV_TRUE;
}
nvl->safe_to_mmap = safe_to_mmap;
}

View File

@@ -0,0 +1,170 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-modeset-interface.h"
#include "os-interface.h"
#include "nv-linux.h"
#include "nvstatus.h"
#include "nv.h"
static const nvidia_modeset_callbacks_t *nv_modeset_callbacks;
static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp)
{
return nv_kmem_cache_alloc_stack(sp);
}
static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp)
{
if (sp != NULL)
{
nv_kmem_cache_free_stack(sp);
}
}
static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb)
{
if ((nv_modeset_callbacks != NULL && cb != NULL) ||
(nv_modeset_callbacks == NULL && cb == NULL))
{
return -EINVAL;
}
nv_modeset_callbacks = cb;
return 0;
}
void nvidia_modeset_suspend(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->suspend(gpuId);
}
}
void nvidia_modeset_resume(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->resume(gpuId);
}
}
void nvidia_modeset_remove(NvU32 gpuId)
{
if (nv_modeset_callbacks && nv_modeset_callbacks->remove)
{
nv_modeset_callbacks->remove(gpuId);
}
}
static void nvidia_modeset_get_gpu_info(nv_gpu_info_t *gpu_info,
const nv_linux_state_t *nvl)
{
nv_state_t *nv = NV_STATE_PTR(nvl);
gpu_info->gpu_id = nv->gpu_id;
gpu_info->pci_info.domain = nv->pci_info.domain;
gpu_info->pci_info.bus = nv->pci_info.bus;
gpu_info->pci_info.slot = nv->pci_info.slot;
gpu_info->pci_info.function = nv->pci_info.function;
gpu_info->os_device_ptr = nvl->dev;
}
void nvidia_modeset_probe(const nv_linux_state_t *nvl)
{
if (nv_modeset_callbacks && nv_modeset_callbacks->probe)
{
nv_gpu_info_t gpu_info;
nvidia_modeset_get_gpu_info(&gpu_info, nvl);
nv_modeset_callbacks->probe(&gpu_info);
}
}
static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info)
{
nv_linux_state_t *nvl;
unsigned int count;
LOCK_NV_LINUX_DEVICES();
count = 0;
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
{
/*
* The gpu_info[] array has NV_MAX_GPUS elements. Fail if there
* are more GPUs than that.
*/
if (count >= NV_MAX_GPUS) {
nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.",
NV_MAX_GPUS);
count = 0;
break;
}
nvidia_modeset_get_gpu_info(&gpu_info[count], nvl);
count++;
}
UNLOCK_NV_LINUX_DEVICES();
return count;
}
NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops)
{
const nvidia_modeset_rm_ops_t local_rm_ops = {
.version_string = NV_VERSION_STRING,
.system_info = {
.allow_write_combining = NV_FALSE,
},
.alloc_stack = nvidia_modeset_rm_ops_alloc_stack,
.free_stack = nvidia_modeset_rm_ops_free_stack,
.enumerate_gpus = nvidia_modeset_enumerate_gpus,
.open_gpu = nvidia_dev_get,
.close_gpu = nvidia_dev_put,
.op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */
.set_callbacks = nvidia_modeset_set_callbacks,
};
if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0)
{
rm_ops->version_string = NV_VERSION_STRING;
return NV_ERR_GENERIC;
}
*rm_ops = local_rm_ops;
if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) {
rm_ops->system_info.allow_write_combining = NV_TRUE;
}
return NV_OK;
}
NV_EXPORT_SYMBOL(nvidia_get_rm_ops);

167
kernel-open/nvidia/nv-msi.c Normal file
View File

@@ -0,0 +1,167 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-msi.h"
#include "nv-proto.h"
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
void NV_API_CALL nv_init_msi(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc = 0;
rc = pci_enable_msi(nvl->pci_dev);
if (rc == 0)
{
nv->interrupt_line = nvl->pci_dev->irq;
nv->flags |= NV_FLAG_USES_MSI;
nvl->num_intr = 1;
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
if (nvl->irq_count == NULL)
{
nv->flags &= ~NV_FLAG_USES_MSI;
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to allocate counter for MSI entry; "
"falling back to PCIe virtual-wire interrupts.\n");
}
else
{
nvl->current_num_irq_tracked = 0;
}
}
else
{
nv->flags &= ~NV_FLAG_USES_MSI;
if (nvl->pci_dev->irq != 0)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to enable MSI; "
"falling back to PCIe virtual-wire interrupts.\n");
}
}
return;
}
void NV_API_CALL nv_init_msix(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int num_intr = 0;
struct msix_entry *msix_entries;
int rc = 0;
int i;
NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock);
rc = os_alloc_mutex(&nvl->msix_bh_mutex);
if (rc != 0)
goto failed;
num_intr = nv_get_max_irq(nvl->pci_dev);
if (num_intr > NV_RM_MAX_MSIX_LINES)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the "
"driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES);
num_intr = NV_RM_MAX_MSIX_LINES;
}
NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
if (nvl->msix_entries == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n");
goto failed;
}
for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++)
{
msix_entries->entry = i;
}
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
if (nvl->irq_count == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n");
goto failed;
}
else
{
nvl->current_num_irq_tracked = 0;
}
rc = nv_pci_enable_msix(nvl, num_intr);
if (rc != NV_OK)
goto failed;
nv->flags |= NV_FLAG_USES_MSIX;
return;
failed:
nv->flags &= ~NV_FLAG_USES_MSIX;
if (nvl->msix_entries)
{
NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
}
if (nvl->irq_count)
{
NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
}
if (nvl->msix_bh_mutex)
{
os_free_mutex(nvl->msix_bh_mutex);
nvl->msix_bh_mutex = NULL;
}
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n");
}
NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl)
{
int i;
int j;
struct msix_entry *msix_entries;
int rc = NV_ERR_INVALID_ARGUMENT;
nv_state_t *nv = NV_STATE_PTR(nvl);
for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr;
i++, msix_entries++)
{
rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix,
nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv),
nv_device_name, (void *)nvl);
if (rc)
{
for( j = 0; j < i; j++)
{
free_irq(nvl->msix_entries[j].vector, (void *)nvl);
}
break;
}
}
return rc;
}
#endif

View File

@@ -0,0 +1,215 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/kernel.h> // For container_of
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/timer.h>
#include "os-interface.h"
#include "nv-linux.h"
#define NV_NANO_TIMER_USE_HRTIMER 1
struct nv_nano_timer
{
#if NV_NANO_TIMER_USE_HRTIMER
struct hrtimer hr_timer; // This parameter holds linux high resolution timer object
// can get replaced with platform specific timer object
#else
struct timer_list jiffy_timer;
#endif
nv_linux_state_t *nv_linux_state;
void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer);
void *pTmrEvent;
};
/*!
* @brief runs nano second resolution timer callback
*
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
static void
nvidia_nano_timer_callback(
nv_nano_timer_t *nv_nstimer)
{
nv_state_t *nv = NULL;
nv_linux_state_t *nvl = nv_nstimer->nv_linux_state;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack_atomic(&sp) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n");
return;
}
nv = NV_STATE_PTR(nvl);
if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n");
}
nv_kmem_cache_free_stack(sp);
}
/*!
* @brief Allocates nano second resolution timer object
*
* @returns nv_nano_timer_t allocated pointer
*/
static nv_nano_timer_t *nv_alloc_nano_timer(void)
{
nv_nano_timer_t *nv_nstimer;
NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t));
if (nv_nstimer == NULL)
{
return NULL;
}
memset(nv_nstimer, 0, sizeof(nv_nano_timer_t));
return nv_nstimer;
}
#if NV_NANO_TIMER_USE_HRTIMER
static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr)
{
struct nv_nano_timer *nv_nstimer =
container_of(hrtmr, struct nv_nano_timer, hr_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
return HRTIMER_NORESTART;
}
#else
static inline void nv_jiffy_timer_callback_typed_data(struct timer_list *timer)
{
struct nv_nano_timer *nv_nstimer =
container_of(timer, struct nv_nano_timer, jiffy_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
}
#endif
/*!
* @brief Creates & initializes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] tmrEvent pointer to TMR_EVENT
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_create_nano_timer(
nv_state_t *nv,
void *pTmrEvent,
nv_nano_timer_t **pnv_nstimer)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer();
if (nv_nstimer == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n");
*pnv_nstimer = NULL;
return;
}
nv_nstimer->nv_linux_state = nvl;
nv_nstimer->pTmrEvent = pTmrEvent;
nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback;
#if NV_NANO_TIMER_USE_HRTIMER
#if NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup
hrtimer_setup(&nv_nstimer->hr_timer, &nv_nano_timer_callback_typed_data,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
#else
hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup
#else
timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0);
#endif // NV_NANO_TIMER_USE_HRTIMER
*pnv_nstimer = nv_nstimer;
}
/*!
* @brief Starts nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
* @param[in] time_ns Relative time in nano seconds
*/
void NV_API_CALL nv_start_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer,
NvU64 time_ns)
{
#if NV_NANO_TIMER_USE_HRTIMER
ktime_t ktime = ktime_set(0, time_ns);
hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL);
#else
unsigned long time_jiffies;
NvU32 time_us;
time_us = (NvU32)(time_ns / 1000);
time_jiffies = usecs_to_jiffies(time_us);
mod_timer(&nv_nstimer->jiffy_timer, jiffies + time_jiffies);
#endif
}
/*!
* @brief Cancels nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_cancel_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_cancel(&nv_nstimer->hr_timer);
#else
nv_timer_delete_sync(&nv_nstimer->jiffy_timer);
#endif
}
/*!
* @brief Cancels & deletes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_destroy_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
nv_cancel_nano_timer(nv, nv_nstimer);
NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t));
}

1027
kernel-open/nvidia/nv-p2p.c Normal file
View File

File diff suppressed because it is too large Load Diff

478
kernel-open/nvidia/nv-p2p.h Normal file
View File

@@ -0,0 +1,478 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_P2P_H_
#define _NV_P2P_H_
/*
* NVIDIA P2P Structure Versioning
*
* For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will
* set the version field of the structure according to the definition used by
* the NVIDIA driver. The "major" field of the version is defined as the upper
* 16 bits, and the "minor" field of the version is defined as the lower 16
* bits. The version field will always be the first 4 bytes of the structure,
* and third-party drivers should check the value of this field in structures
* allocated by the NVIDIA driver to ensure runtime compatibility.
*
* In general, version numbers will be incremented as follows:
* - When a backwards-compatible change is made to the structure layout, the
* minor version for that structure will be incremented. Third-party drivers
* built against an older minor version will continue to work with the newer
* minor version used by the NVIDIA driver, without recompilation.
* - When a breaking change is made to the structure layout, the major version
* will be incremented. Third-party drivers built against an older major
* version require at least recompilation and potentially additional updates
* to use the new API.
*/
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
#define NVIDIA_P2P_MAJOR_VERSION(v) \
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
#define NVIDIA_P2P_MINOR_VERSION(v) \
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v))))
enum {
NVIDIA_P2P_ARCHITECTURE_TESLA = 0,
NVIDIA_P2P_ARCHITECTURE_FERMI,
NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI
};
#define NVIDIA_P2P_PARAMS_VERSION 0x00010001
enum {
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE
};
#define NVIDIA_P2P_GPU_UUID_LEN 16
typedef
struct nvidia_p2p_params {
uint32_t version;
uint32_t architecture;
union nvidia_p2p_mailbox_addresses {
struct {
uint64_t wmb_addr;
uint64_t wmb_data;
uint64_t rreq_addr;
uint64_t rcomp_addr;
uint64_t reserved[2];
} fermi;
} addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1];
} nvidia_p2p_params_t;
/*
* Macro for users to detect
* driver support for persistent pages.
*/
#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
/*
* This API is not supported.
*/
int nvidia_p2p_init_mapping(uint64_t p2p_token,
struct nvidia_p2p_params *params,
void (*destroy_callback)(void *data),
void *data);
/*
* This API is not supported.
*/
int nvidia_p2p_destroy_mapping(uint64_t p2p_token);
enum nvidia_p2p_page_size_type {
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
NVIDIA_P2P_PAGE_SIZE_64KB,
NVIDIA_P2P_PAGE_SIZE_128KB,
NVIDIA_P2P_PAGE_SIZE_COUNT
};
typedef
struct nvidia_p2p_page {
uint64_t physical_address;
union nvidia_p2p_request_registers {
struct {
uint32_t wreqmb_h;
uint32_t rreqmb_h;
uint32_t rreqmb_0;
uint32_t reserved[3];
} fermi;
} registers;
} nvidia_p2p_page_t;
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00020000
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
/*
* Page Table Flags
*/
#define NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE 0x1
typedef
struct nvidia_p2p_page_table {
uint32_t version;
uint32_t page_size; /* enum nvidia_p2p_page_size_type */
struct nvidia_p2p_page **pages;
uint32_t entries;
uint8_t *gpu_uuid;
uint32_t flags;
} nvidia_p2p_page_table_t;
/*
* @brief
* Make the pages underlying a range of GPU virtual memory
* accessible to a third-party device.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] free_callback
* A pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address, uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* Flags to be used with persistent APIs
*/
#define NVIDIA_P2P_FLAGS_DEFAULT 0
#define NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING 1
/*
* @brief
* Pin and make the pages underlying a range of GPU virtual memory
* accessible to a third-party device. The pages will persist until
* explicitly freed by nvidia_p2p_put_pages_persistent().
*
* Persistent GPU memory mappings are not supported on
* MIG-enabled devices and vGPU.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] flags
* NVIDIA_P2P_FLAGS_DEFAULT:
* Default value to be used if no specific behavior is expected.
* NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING:
* Force BAR1 mappings on certain coherent platforms,
* subject to capability and supported topology.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages_persistent(uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
uint32_t flags);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
struct pci_dev;
typedef
struct nvidia_p2p_dma_mapping {
uint32_t version;
enum nvidia_p2p_page_size_type page_size_type;
uint32_t entries;
uint64_t *dma_addresses;
void *private;
struct pci_dev *pci_dev;
} nvidia_p2p_dma_mapping_t;
/*
* @brief
* Make the physical pages retrieved using nvidia_p2p_get_pages accessible to
* a third-party device.
*
* @param[in] peer
* The struct pci_dev * of the peer device that needs to DMA to/from the
* mapping.
* @param[in] page_table
* The page table outlining the physical pages underlying the mapping, as
* retrieved with nvidia_p2p_get_pages().
* @param[out] dma_mapping
* The DMA mapping containing the DMA addresses to use on the third-party
* device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_map_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **dma_mapping);
/*
* @brief
* Unmap the physical pages previously mapped to the third-party device by
* nvidia_p2p_dma_map_pages().
*
* @param[in] peer
* The struct pci_dev * of the peer device that the DMA mapping belongs to.
* @param[in] page_table
* The page table backing the DMA mapping to be unmapped.
* @param[in] dma_mapping
* The DMA mapping containing the DMA addresses used by the third-party
* device, as retrieved with nvidia_p2p_dma_map_pages(). After this call
* returns, neither this struct nor the addresses contained within will be
* valid for use by the third-party device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping *dma_mapping);
/*
* @brief
* Release a set of pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages(uint64_t p2p_token,
uint32_t va_space, uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Release a set of persistent pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages_persistent(uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table,
uint32_t flags);
/*
* @brief
* Free a third-party P2P page table. (This function is a no-op.)
*
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Free a third-party P2P DMA mapping. (This function is a no-op.)
*
* @param[in] dma_mapping
* A pointer to the DMA mapping structure.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION)
typedef
struct nvidia_p2p_rsync_driver {
uint32_t version;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
} nvidia_p2p_rsync_driver_t;
/*
* @brief
* Registers the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure. The NVIDIA driver would use,
*
* get_relaxed_ordering_mode to obtain a reference to the current relaxed
* ordering mode (treated as a boolean) from the rsync driver.
*
* put_relaxed_ordering_mode to release a reference to the current relaxed
* ordering mode back to the rsync driver. The NVIDIA driver will call this
* function once for each successful call to get_relaxed_ordering_mode, and
* the relaxed ordering mode must not change until the last reference is
* released.
*
* wait_for_rsync to call into the rsync module to issue RSYNC. This callback
* can't sleep or re-schedule as it may arrive under spinlocks.
* @param[in] data
* A pointer to the rsync driver's private data.
*
* @Returns
* 0 upon successful completion.
* -EINVAL parameters are incorrect.
* -EBUSY if a module is already registered or GPU devices are in use.
*/
int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
/*
* @brief
* Unregisters the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure.
* @param[in] data
* A pointer to the rsync driver's private data.
*/
void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION)
typedef struct nvidia_p2p_rsync_reg {
void *ptr;
size_t size;
struct pci_dev *ibmnpu;
struct pci_dev *gpu;
uint32_t cluster_id;
uint32_t socket_id;
} nvidia_p2p_rsync_reg_t;
typedef struct nvidia_p2p_rsync_reg_info {
uint32_t version;
nvidia_p2p_rsync_reg_t *regs;
size_t entries;
} nvidia_p2p_rsync_reg_info_t;
/*
* @brief
* This interface is no longer supported and will always return an error. It
* is left in place (for now) to allow third-party callers to build without
* any errors.
*
* @Returns
* -ENODEV
*/
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
/*
* @brief
* This interface is no longer supported. It is left in place (for now) to
* allow third-party callers to build without any errors.
*/
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);
#endif /* _NV_P2P_H_ */

405
kernel-open/nvidia/nv-pat.c Normal file
View File

@@ -0,0 +1,405 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-pat.h"
int nv_pat_mode = NV_PAT_MODE_DISABLED;
#if defined(NV_ENABLE_PAT_SUPPORT)
/*
* Private PAT support for use by the NVIDIA driver. This is used on
* kernels that do not modify the PAT to include a write-combining
* entry.
*
* On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the
* WC entry is as expected before using PAT.
*/
#if defined(CONFIG_X86_PAT)
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0
#else
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1
#endif
#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2))
#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2))
#define NV_PAT_ENTRY(pat, index) \
(((pat) & (0xff << ((index)*8))) >> ((index)*8))
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
static unsigned long orig_pat1, orig_pat2;
static inline void nv_disable_caches(unsigned long *cr4)
{
unsigned long cr0 = read_cr0();
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
wbinvd();
*cr4 = __read_cr4();
if (*cr4 & 0x80) __write_cr4(*cr4 & ~0x80);
__flush_tlb();
}
static inline void nv_enable_caches(unsigned long cr4)
{
unsigned long cr0 = read_cr0();
wbinvd();
__flush_tlb();
write_cr0((cr0 & 0x9fffffff));
if (cr4 & 0x80) __write_cr4(cr4);
}
static void nv_setup_pat_entries(void *info)
{
unsigned long pat1, pat2, cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_READ_PAT_ENTRIES(pat1, pat2);
pat1 &= 0xffff00ff;
pat1 |= 0x00000100;
NV_WRITE_PAT_ENTRIES(pat1, pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
static void nv_restore_pat_entries(void *info)
{
unsigned long cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
static int
nvidia_cpu_teardown(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_restore_pat_entries(NULL);
else
smp_call_function(nv_restore_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int
nvidia_cpu_online(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_setup_pat_entries(NULL);
else
smp_call_function(nv_setup_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int nv_enable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2);
on_each_cpu(nv_setup_pat_entries, NULL, 1);
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2);
return 1;
}
static void nv_disable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
on_each_cpu(nv_restore_pat_entries, NULL, 1);
nv_pat_mode = NV_PAT_MODE_DISABLED;
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2);
}
static int
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
/* CPU_DOWN_FAILED was added by the following commit
* 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379
*
* CPU_DOWN_PREPARE was added by the following commit
* 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8
*
* We use one ifdef for both macros since they were added on the same day.
*/
#if defined(CPU_DOWN_FAILED)
switch (action)
{
case CPU_DOWN_FAILED:
case CPU_ONLINE:
nvidia_cpu_online((NvUPtr)hcpu);
break;
case CPU_DOWN_PREPARE:
nvidia_cpu_teardown((NvUPtr)hcpu);
break;
}
#endif
return NOTIFY_OK;
}
static enum cpuhp_state nvidia_pat_online;
static int
nvidia_register_cpu_hotplug_notifier(void)
{
int ret;
/*
* cpuhp_setup_state() returns positive number on success when state is
* CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"nvidia/pat:online",
nvidia_cpu_online,
nvidia_cpu_teardown);
if (ret < 0)
{
/*
* If cpuhp_setup_state() fails, the cpuhp_remove_state()
* should never be called. If it gets called, we might remove
* some other state. Hence, explicitly set
* nvidia_pat_online to zero. This will trigger a BUG()
* in cpuhp_remove_state().
*/
nvidia_pat_online = 0;
}
else
{
nvidia_pat_online = ret;
}
if (ret < 0)
{
nv_disable_pat_support();
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU hotplug notifier registration failed!\n");
return -EIO;
}
return 0;
}
static void
nvidia_unregister_cpu_hotplug_notifier(void)
{
cpuhp_remove_state(nvidia_pat_online);
}
#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_enable_builtin_pat_support(void)
{
return 0;
}
static void nv_disable_builtin_pat_support(void)
{
}
static int nvidia_register_cpu_hotplug_notifier(void)
{
return -EIO;
}
static void nvidia_unregister_cpu_hotplug_notifier(void)
{
}
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_determine_pat_mode(void)
{
unsigned int pat1, pat2, i;
NvU8 PAT_WC_index;
if (!test_bit(X86_FEATURE_PAT,
(volatile unsigned long *)&boot_cpu_data.x86_capability))
{
if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
(boot_cpu_data.cpuid_level < 1) ||
((cpuid_edx(1) & (1 << 16)) == 0) ||
(boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU does not support the PAT.\n");
return NV_PAT_MODE_DISABLED;
}
}
NV_READ_PAT_ENTRIES(pat1, pat2);
PAT_WC_index = 0xf;
for (i = 0; i < 4; i++)
{
if (NV_PAT_ENTRY(pat1, i) == 0x01)
{
PAT_WC_index = i;
break;
}
if (NV_PAT_ENTRY(pat2, i) == 0x01)
{
PAT_WC_index = (i + 4);
break;
}
}
if (PAT_WC_index == 1)
{
return NV_PAT_MODE_KERNEL;
}
else if (PAT_WC_index != 0xf)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: PAT configuration unsupported.\n");
return NV_PAT_MODE_DISABLED;
}
else
{
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
return NV_PAT_MODE_BUILTIN;
#else
return NV_PAT_MODE_DISABLED;
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
}
}
int nv_enable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_DISABLED)
return 1;
nv_pat_mode = nv_determine_pat_mode();
switch (nv_pat_mode)
{
case NV_PAT_MODE_DISABLED:
/* avoid the PAT if unavailable/unusable */
return 0;
case NV_PAT_MODE_KERNEL:
/* inherit the kernel's PAT layout */
return 1;
case NV_PAT_MODE_BUILTIN:
/* use builtin code to modify the PAT layout */
break;
}
return nv_enable_builtin_pat_support();
}
void nv_disable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_BUILTIN)
return;
nv_disable_builtin_pat_support();
}
int nv_init_pat_support(nvidia_stack_t *sp)
{
NV_STATUS status;
NvU32 data;
int disable_pat = 0;
int ret = 0;
status = rm_read_registry_dword(sp, NULL,
NV_USE_PAGE_ATTRIBUTE_TABLE, &data);
if ((status == NV_OK) && ((int)data != ~0))
{
disable_pat = (data == 0);
}
if (!disable_pat)
{
nv_enable_pat_support();
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
ret = nvidia_register_cpu_hotplug_notifier();
return ret;
}
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: builtin PAT support disabled.\n");
}
return 0;
}
void nv_teardown_pat_support(void)
{
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
nv_disable_pat_support();
nvidia_unregister_cpu_hotplug_notifier();
}
}
#endif /* defined(NV_ENABLE_PAT_SUPPORT) */

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PAT_H_
#define _NV_PAT_H_
#include "nv-linux.h"
#if defined(NV_ENABLE_PAT_SUPPORT)
extern int nv_init_pat_support(nvidia_stack_t *sp);
extern void nv_teardown_pat_support(void);
extern int nv_enable_pat_support(void);
extern void nv_disable_pat_support(void);
#else
static inline int nv_init_pat_support(nvidia_stack_t *sp)
{
(void)sp;
return 0;
}
static inline void nv_teardown_pat_support(void)
{
return;
}
static inline int nv_enable_pat_support(void)
{
return 1;
}
static inline void nv_disable_pat_support(void)
{
return;
}
#endif
#endif /* _NV_PAT_H_ */

View File

@@ -0,0 +1,90 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "nv-pci-table.h"
#include "cpuopsys.h"
#if defined(NV_BSD)
/* Define PCI classes that FreeBSD's linuxkpi is missing */
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_CLASS_DISPLAY_VGA 0x0300
#define PCI_CLASS_DISPLAY_3D 0x0302
#define PCI_CLASS_BRIDGE_OTHER 0x0680
#endif
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{ }
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[4] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
.class_mask = ~0
},
{ }
};
#if defined(NV_LINUX)
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
#endif

View File

@@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TABLE_H_
#define _NV_PCI_TABLE_H_
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
extern struct pci_device_id nv_module_device_table[4];
#endif /* _NV_PCI_TABLE_H_ */

2681
kernel-open/nvidia/nv-pci.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,130 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
/*!
* @brief Unpowergate the display.
*
* Increment the device's usage counter, run pm_request_resume(dev)
* and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* pm_request_resume() submits a request to execute the subsystem-level
* resume callback for the device (the request is represented by a work
* item in pm_wq); returns 0 on success, 1 if the device's runtime PM
* status was already 'active', or error code if the request hasn't
* been queued up.
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate(
nv_state_t *nv)
{
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_get(nvl->dev);
if (ret == 1)
{
nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n");
}
else if (ret == -EINPROGRESS)
{
/*
* pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC)
* which internally calls rpm_resume() and this function will throw
* "-EINPROGRESS" if it is being called when device state is
* RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set.
*/
nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n");
}
else if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret);
return NV_ERR_GENERIC;
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Powergate the display.
*
* Decrement the device's usage counter; if the result is 0 then run
* pm_request_idle(dev) and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_powergate(
nv_state_t *nv)
{
#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE
NV_STATUS status = NV_ERR_GENERIC;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_put(nvl->dev);
if (ret == 0)
{
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret);
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

1093
kernel-open/nvidia/nv-reg.h Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "nv-linux.h"
#include "os-interface.h"
#include "nv-report-err.h"
#define CREATE_TRACE_POINTS
#include "nv-tracepoint.h"
nv_report_error_cb_t nv_error_cb_handle = NULL;
int nvidia_register_error_cb(nv_report_error_cb_t report_error_cb)
{
if (report_error_cb == NULL)
return -EINVAL;
if (nv_error_cb_handle != NULL)
return -EBUSY;
nv_error_cb_handle = report_error_cb;
return 0;
}
EXPORT_SYMBOL(nvidia_register_error_cb);
int nvidia_unregister_error_cb(void)
{
if (nv_error_cb_handle == NULL)
return -EPERM;
nv_error_cb_handle = NULL;
return 0;
}
EXPORT_SYMBOL(nvidia_unregister_error_cb);
void nv_report_error(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
char *buffer;
gfp_t gfp = NV_MAY_SLEEP() ? NV_GFP_NO_OOM : NV_GFP_ATOMIC;
buffer = kvasprintf(gfp, format, ap);
if (buffer == NULL)
return;
trace_nvidia_dev_xid(dev, error_number, buffer);
if (nv_error_cb_handle != NULL)
nv_error_cb_handle(dev, error_number, buffer, strlen(buffer) + 1);
kfree(buffer);
}

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_REPORT_ERR_H_
#define _NV_REPORT_ERR_H_
/*
* @brief
* Callback definition for obtaining XID error string and data.
*
* @param[in] pci_dev *
* Structure describring GPU PCI device.
* @param[in] uint32_t
* XID number
* @param[in] char *
* Error string with HWERR info.
* @param[in] int
* Length of error string.
*/
typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, size_t);
/*
* @brief
* Register callback function to obtain XID error string and data.
*
* @param[in] report_error_cb
* A function pointer to recieve callback.
*
* @return
* 0 upon successful completion.
* -EINVAL callback handle is NULL.
* -EBUSY callback handle is already registered.
*/
int nvidia_register_error_cb(nv_report_error_cb_t report_error_cb);
/*
* @brief
* Unregisters callback function handle.
*
* @return
* 0 upon successful completion.
* -EPERM unregister not permitted on NULL callback handle.
*/
int nvidia_unregister_error_cb(void);
#endif /* _NV_REPORT_ERR_H_ */

View File

@@ -0,0 +1,161 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-rsync.h"
nv_rsync_info_t g_rsync_info;
void nv_init_rsync_info(
void
)
{
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
g_rsync_info.usage_count = 0;
g_rsync_info.data = NULL;
NV_INIT_MUTEX(&g_rsync_info.lock);
}
void nv_destroy_rsync_info(
void
)
{
WARN_ON(g_rsync_info.data);
WARN_ON(g_rsync_info.usage_count);
WARN_ON(g_rsync_info.relaxed_ordering_mode);
}
int nv_get_rsync_info(
void
)
{
int mode;
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.get_relaxed_ordering_mode)
{
rc = g_rsync_info.get_relaxed_ordering_mode(&mode,
g_rsync_info.data);
if (rc != 0)
{
goto done;
}
g_rsync_info.relaxed_ordering_mode = !!mode;
}
}
g_rsync_info.usage_count++;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_put_rsync_info(
void
)
{
int mode;
down(&g_rsync_info.lock);
g_rsync_info.usage_count--;
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.put_relaxed_ordering_mode)
{
mode = g_rsync_info.relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data);
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
}
}
up(&g_rsync_info.lock);
}
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.get_relaxed_ordering_mode != NULL)
{
rc = -EBUSY;
goto done;
}
if (g_rsync_info.usage_count != 0)
{
rc = -EBUSY;
goto done;
}
g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode;
g_rsync_info.wait_for_rsync = wait_for_rsync;
g_rsync_info.data = data;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
down(&g_rsync_info.lock);
WARN_ON(g_rsync_info.usage_count != 0);
WARN_ON(g_rsync_info.get_relaxed_ordering_mode !=
get_relaxed_ordering_mode);
WARN_ON(g_rsync_info.put_relaxed_ordering_mode !=
put_relaxed_ordering_mode);
WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync);
WARN_ON(g_rsync_info.data != data);
g_rsync_info.get_relaxed_ordering_mode = NULL;
g_rsync_info.put_relaxed_ordering_mode = NULL;
g_rsync_info.wait_for_rsync = NULL;
g_rsync_info.data = NULL;
up(&g_rsync_info.lock);
}

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_RSYNC_H_
#define _NV_RSYNC_H_
#include "nv-linux.h"
typedef struct nv_rsync_info
{
struct semaphore lock;
uint32_t usage_count;
NvBool relaxed_ordering_mode;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
void *data;
} nv_rsync_info_t;
void nv_init_rsync_info(void);
void nv_destroy_rsync_info(void);
int nv_get_rsync_info(void);
void nv_put_rsync_info(void);
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
#endif

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "conftest.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM nvidia
#if !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NV_REPORT_ERR_H
#include <linux/tracepoint.h>
TRACE_EVENT(nvidia_dev_xid,
TP_PROTO(const struct pci_dev *pdev, uint32_t error_code, const char *msg),
TP_ARGS(pdev, error_code, msg),
TP_STRUCT__entry(
__string(dev, pci_name(pdev))
__field (u32, error_code)
__string(msg, msg)
),
TP_fast_assign(
#if NV_ASSIGN_STR_ARGUMENT_COUNT == 1
__assign_str(dev);
__assign_str(msg);
#else
__assign_str(dev, pci_name(pdev));
__assign_str(msg, msg);
#endif
__entry->error_code = error_code;
),
TP_printk("Xid (PCI:%s): %u, %s", __get_str(dev), __entry->error_code, __get_str(msg))
);
#endif // !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE nv-tracepoint
#include <trace/define_trace.h>

View File

@@ -0,0 +1,155 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap,
NvU32 prot,
void *pAllocPriv,
NvU64 pageIndex,
NvU32 fd
)
{
NV_STATUS status = NV_OK;
nv_alloc_mapping_context_t *nvamc = NULL;
nv_file_private_t *nvfp = NULL;
nv_linux_file_private_t *nvlfp = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
void *priv = NULL;
nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv);
if (nvfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
nvlfp = nv_get_nvlfp_from_nvfp(nvfp);
nvamc = &nvlfp->mmap_context;
if (nvamc->valid)
{
status = NV_ERR_STATE_IN_USE;
goto done;
}
os_mem_set((void*) nvamc, 0, sizeof(nv_alloc_mapping_context_t));
if (NV_IS_CTL_DEVICE(nv))
{
nvamc->alloc = pAllocPriv;
nvamc->page_index = pageIndex;
}
else
{
if (NV_STATE_PTR(nvlfp->nvptr) != nv)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
status = os_alloc_mem((void**) &nvamc->memArea.pRanges,
sizeof(MemoryRange) * nvuap->memArea.numRanges);
if (status != NV_OK)
{
nvamc->memArea.pRanges = NULL;
goto done;
}
nvamc->memArea.numRanges = nvuap->memArea.numRanges;
os_mem_copy(nvamc->memArea.pRanges, nvuap->memArea.pRanges,
sizeof(MemoryRange) * nvuap->memArea.numRanges);
if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE)
{
nvamc->page_array = nvuap->page_array;
nvamc->num_pages = nvuap->num_pages;
}
nvamc->access_start = nvuap->access_start;
nvamc->access_size = nvuap->access_size;
}
nvamc->prot = prot;
nvamc->valid = NV_TRUE;
nvamc->caching = nvuap->caching;
done:
nv_put_file_private(priv);
return status;
}
NV_STATUS NV_API_CALL nv_alloc_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 pageIndex,
NvU32 pageOffset,
NvU64 size,
NvU32 protect,
NvU64 *pUserAddress,
void **ppPrivate
)
{
nv_alloc_t *at = pAllocPrivate;
if (at->flags.contig)
*pUserAddress = (at->page_table[0].phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
else
*pUserAddress = (at->page_table[pageIndex].phys_addr + pageOffset);
return NV_OK;
}
void NV_API_CALL nv_free_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 userAddress,
void *pPrivate
)
{
}
/*
* This function checks if a user mapping should be allowed given the GPU's 4K
* page isolation requirements.
*/
NV_STATUS NV_API_CALL nv_check_usermap_access_params(
nv_state_t *nv,
const nv_usermap_access_params_t *nvuap
)
{
const NvU64 addr = nvuap->addr;
const NvU64 size = nvuap->size;
if (rm_gpu_need_4k_page_isolation(nv) &&
NV_4K_PAGE_ISOLATION_REQUIRED(addr, size))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n");
return NV_ERR_OPERATING_SYSTEM;
}
return NV_OK;
}

1104
kernel-open/nvidia/nv-vm.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
{
/* direct-mapped kernel address */
if (virt_addr_valid((void *)address))
return __pa(address);
nv_printf(NV_DBG_ERRORS,
"NVRM: can't translate address in %s()!\n", __FUNCTION__);
return 0;
}

6295
kernel-open/nvidia/nv.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,348 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv_gpu_ops.h
*
* This file defines the interface between the common RM layer
* and the OS specific platform layers. (Currently supported
* are Linux and KMD)
*
*/
#ifndef _NV_GPU_OPS_H_
#define _NV_GPU_OPS_H_
#include "nvgputypes.h"
#include "nv_uvm_types.h"
#include "nv_uvm_user_types.h"
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuTsg *gpuTsgHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS calculatePCIELinkRateMBps(NvU32 lanes,
NvU32 pciLinkMaxSpeed,
NvU32 *pcieLinkRate);
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuGuid,
struct gpuDevice **device,
NvBool bCreateSmcPartition);
NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
NvU64 vaBase,
NvU64 vaSize,
NvBool enableAts,
gpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *p2pCaps);
void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
NvLength pageCount,
NvU64 pageSize,
gpuPmaAllocationOptions *pPmaAllocOptions,
NvU64 *pPages);
void nvGpuOpsPmaFreePages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
const gpuTsgAllocParams *params,
gpuTsgHandle *tsgHandle);
NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
NvU64 pointer);
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
NvU64 memory, NvLength length,
void **cpuPtr, NvU64 pageSize);
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
void* cpuPtr);
NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
gpuCaps *caps);
NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
gpuCesCaps *caps);
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuAddressSpace *dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress);
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
gpuMemoryInfo *pGpuMemoryInfo);
NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
NvHandle hSubDevice, NvU8 *gpuGuid,
unsigned guidLength);
NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
const NvU8 *gpuUuid,
NvHandle *hClient,
NvHandle *hDevice,
NvHandle *hSubDevice);
NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
NvHandle hPhysHandle);
NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo);
NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
NvU32 *pSubdeviceId);
NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid,
NvU64 *dmaAdress);
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
NV_STATUS nvGpuOpsGetNvlinkInfo(struct gpuDevice *device, gpuNvlinkInfo * nvlinkInfo);
NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
NvHandle hUserClient,
NvHandle hUserVASpace,
struct gpuAddressSpace **vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
gpuAccessCntrInfo *pAccessCntrInfo,
NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo,
const gpuAccessCntrConfig *pAccessCntrConfig);
NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
struct gpuDevice *device2,
NvHandle *hP2pObject);
NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
NvHandle hP2pObject);
NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsGetExternalAllocPhysAddrs(struct gpuAddressSpace *vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
gpuExternalPhysAddrInfo *pGpuExternalPhysAddrInfo);
NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
NvHandle hClient,
NvHandle hChannel,
gpuRetainedChannel **retainedChannel,
gpuChannelInstanceInfo *channelInstanceInfo);
void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
gpuChannelResourceBindParams *channelResourceBindParams);
void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
const void *pFaultPacket);
// Private interface used for windows only
#if defined(NV_WINDOWS)
NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient);
NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel);
#endif // WINDOWS
// Interface used for SR-IOV heavy
NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
const gpuPagingChannelAllocParams *params,
gpuPagingChannelHandle *channelHandle,
gpuPagingChannelInfo *channelinfo);
void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device,
NvU64 *dstAddress);
void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device);
NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(gpuFaultInfo *pFaultInfo,
NvBool bCopyAndFlush);
NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo,
NvBool bEnable);
void nvGpuOpsReportFatalError(NV_STATUS error);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
NvU8 const *authTagBuffer);
NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 *messageNum);
NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 increment,
NvU8 *iv);
NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx,
NvU8 direction,
NvU32 bufferSize);
#endif /* _NV_GPU_OPS_H_*/

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,57 @@
NVIDIA_SOURCES ?=
NVIDIA_SOURCES_CXX ?=
NVIDIA_SOURCES += nvidia/nv-platform.c
NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c
NVIDIA_SOURCES += nvidia/nv-bpmp.c
NVIDIA_SOURCES += nvidia/nv-gpio.c
NVIDIA_SOURCES += nvidia/nv-backlight.c
NVIDIA_SOURCES += nvidia/nv-imp.c
NVIDIA_SOURCES += nvidia/nv-platform-pm.c
NVIDIA_SOURCES += nvidia/nv-ipc-soc.c
NVIDIA_SOURCES += nvidia/nv.c
NVIDIA_SOURCES += nvidia/nv-pci.c
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
NVIDIA_SOURCES += nvidia/nv-nano-timer.c
NVIDIA_SOURCES += nvidia/nv-acpi.c
NVIDIA_SOURCES += nvidia/nv-cray.c
NVIDIA_SOURCES += nvidia/nv-dma.c
NVIDIA_SOURCES += nvidia/nv-i2c.c
NVIDIA_SOURCES += nvidia/nv-mmap.c
NVIDIA_SOURCES += nvidia/nv-p2p.c
NVIDIA_SOURCES += nvidia/nv-pat.c
NVIDIA_SOURCES += nvidia/nv-procfs.c
NVIDIA_SOURCES += nvidia/nv-usermap.c
NVIDIA_SOURCES += nvidia/nv-vm.c
NVIDIA_SOURCES += nvidia/nv-vtophys.c
NVIDIA_SOURCES += nvidia/os-interface.c
NVIDIA_SOURCES += nvidia/os-mlock.c
NVIDIA_SOURCES += nvidia/os-pci.c
NVIDIA_SOURCES += nvidia/os-registry.c
NVIDIA_SOURCES += nvidia/os-usermap.c
NVIDIA_SOURCES += nvidia/nv-modeset-interface.c
NVIDIA_SOURCES += nvidia/nv-pci-table.c
NVIDIA_SOURCES += nvidia/nv-kthread-q.c
NVIDIA_SOURCES += nvidia/nv-memdbg.c
NVIDIA_SOURCES += nvidia/nv-report-err.c
NVIDIA_SOURCES += nvidia/nv-rsync.c
NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-caps-imex.c
NVIDIA_SOURCES += nvidia/nv-clk.c
NVIDIA_SOURCES += nvidia/nv-host1x.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
NVIDIA_SOURCES += nvidia/libspdm_aead.c
NVIDIA_SOURCES += nvidia/libspdm_ecc.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf.c
NVIDIA_SOURCES += nvidia/libspdm_rand.c
NVIDIA_SOURCES += nvidia/libspdm_shash.c
NVIDIA_SOURCES += nvidia/libspdm_rsa.c
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
NVIDIA_SOURCES += nvidia/libspdm_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
NVIDIA_SOURCES += nvidia/libspdm_internal_crypt_lib.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
NVIDIA_SOURCES += nvidia/libspdm_ec.c
NVIDIA_SOURCES += nvidia/libspdm_x509.c
NVIDIA_SOURCES += nvidia/libspdm_rsa_ext.c

View File

@@ -0,0 +1,247 @@
###########################################################################
# Kbuild fragment for nvidia.ko
###########################################################################
#
# Define NVIDIA_{SOURCES,OBJECTS}
#
include $(src)/nvidia/nvidia-sources.Kbuild
NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES))
obj-m += nvidia.o
nvidia-y := $(NVIDIA_OBJECTS)
NVIDIA_KO = nvidia/nvidia.ko
#
# nv-kernel.o_binary is the core binary component of nvidia.ko, shared
# across all UNIX platforms. Create a symlink, "nv-kernel.o" that
# points to nv-kernel.o_binary, and add nv-kernel.o to the list of
# objects to link into nvidia.ko.
#
# Note that:
# - The kbuild "clean" rule will delete all objects in nvidia-y (which
# is why we use a symlink instead of just adding nv-kernel.o_binary
# to nvidia-y).
# - kbuild normally uses the naming convention of ".o_shipped" for
# binary files. That is not used here, because the kbuild rule to
# create the "normal" object file from ".o_shipped" does a copy, not
# a symlink. This file is quite large, so a symlink is preferred.
# - The file added to nvidia-y should be relative to gmake's cwd.
# But, the target for the symlink rule should be prepended with $(obj).
# - The "symlink" command is called using kbuild's if_changed macro to
# generate an .nv-kernel.o.cmd file which can be used on subsequent
# runs to determine if the command line to create the symlink changed
# and needs to be re-executed.
#
NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary
NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o
targets += $(NVIDIA_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE
$(call if_changed,symlink)
nvidia-y += $(NVIDIA_BINARY_OBJECT_O)
#
# Define nvidia.ko-specific CFLAGS.
#
NVIDIA_CFLAGS += -I$(src)/nvidia
NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS
ifeq ($(NV_BUILD_TYPE),release)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG
endif
ifeq ($(NV_BUILD_TYPE),develop)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER
endif
ifeq ($(NV_BUILD_TYPE),debug)
NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER
endif
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS))
#
# nv-procfs.c requires nv-compiler.h
#
NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h
$(NV_COMPILER_VERSION_HEADER):
@echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@
$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER)
clean-files += $(NV_COMPILER_VERSION_HEADER)
#
# Build nv-interface.o from the kernel interface layer objects, suitable
# for further processing by the top-level makefile to produce a precompiled
# kernel interface file.
#
NVIDIA_INTERFACE := nvidia/nv-interface.o
# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions
# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6
# look at both.
always += $(NVIDIA_INTERFACE)
always-y += $(NVIDIA_INTERFACE)
$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS))
$(LD) -r -o $@ $^
#
# Register the conftests needed by nvidia.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS)
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache_shared
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_rebar_get_possible_sizes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_attr_guest_sev_snp
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hv_get_isolation_type
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ptep_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_zone_for_each_trip
NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_bind_cdev_to_trip
NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_unbind_cdev_from_trip
NV_CONFTEST_FUNCTION_COMPILE_TESTS += update_devfreq
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += stack_trace
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_FUNCTION_COMPILE_TESTS += assign_str
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += shrinker_alloc
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_screen_info
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pci_find_host_bridge
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_send_cmd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init_cb
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_init_cb
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto_akcipher_verify
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
NV_CONFTEST_SYMBOL_COMPILE_TESTS += dma_buf_ops_attach_has_arg_dev
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_encrypted
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_decrypted
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___platform_driver_register
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___platform_driver_register
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_mutex_destroy
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_queue_has_trapno_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present
NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += of_property_for_each_u32_has_internal_args
NV_CONFTEST_TYPE_COMPILE_TESTS += platform_driver_struct_remove_returns_void
NV_CONFTEST_TYPE_COMPILE_TESTS += class_create_has_no_owner_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += class_devnode_has_const_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_dev_profile_has_is_cooling_device
NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_has_freq_table
NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_has_suspend_freq
NV_CONFTEST_TYPE_COMPILE_TESTS += has_enum_pidtype_tgid
NV_CONFTEST_TYPE_COMPILE_TESTS += bpmp_mrq_has_strap_set
NV_CONFTEST_TYPE_COMPILE_TESTS += register_shrinker_has_format_arg
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_domain_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_init
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_off
NV_CONFTEST_GENERIC_COMPILE_TESTS += memory_failure_mf_sw_simulated_defined
NV_CONFTEST_GENERIC_COMPILE_TESTS += device_vm_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += pcie_reset_flr
NV_CONFTEST_GENERIC_COMPILE_TESTS += module_import_ns_takes_constant

View File

@@ -0,0 +1,46 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
bool libspdm_aead_gcm_prealloc(void **context);
void libspdm_aead_free(void *context);
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
bool libspdm_check_crypto_backend(void);
bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen);
bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen);
bool libspdm_pem_to_der(const uint8_t *pem_cert, uint8_t *der_cert, size_t pem_size, size_t *p_der_size);
bool libspdm_der_to_pem(const uint8_t *der_cert, uint8_t *pem_cert, size_t der_size, size_t *p_pem_size);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,331 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \
(defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \
defined(NV_GET_USER_PAGES_HAS_VMAS_ARG))
#define NV_NUM_PIN_PAGES_PER_ITERATION 0x80000
#endif
static inline int nv_follow_flavors(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
struct follow_pfnmap_args args = {};
int rc;
args.address = address;
args.vma = vma;
rc = follow_pfnmap_start(&args);
if (rc)
return rc;
*pfn = args.pfn;
follow_pfnmap_end(&args);
return 0;
#elif NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
int status = 0;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return status;
//
// The first argument of follow_pte() was changed from
// mm_struct to vm_area_struct in kernel 6.10.
//
#if defined(NV_FOLLOW_PTE_ARG1_VMA)
status = follow_pte(vma, address, &ptep, &ptl);
#else
status = follow_pte(vma->vm_mm, address, &ptep, &ptl);
#endif
if (status)
return status;
#if defined(NV_PTEP_GET_PRESENT)
*pfn = pte_pfn(ptep_get(ptep));
#else
*pfn = pte_pfn(READ_ONCE(*ptep));
#endif
// The lock is acquired inside follow_pte()
pte_unmap_unlock(ptep, ptl);
return 0;
#else
return -1;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
}
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#else
return nv_follow_flavors(vma, address, pfn);
#endif
}
/*!
* @brief Locates the PFNs for a user IO address range, and converts those to
* their associated PTEs.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and page count parameters.
* @param[in] start Beginning of the virtual address range of the IO PTEs.
* @param[in] page_count Number of pages containing the IO range being
* mapped.
* @param[in,out] pte_array Storage array for PTE addresses. Must be large
* enough to contain at least page_count pointers.
*
* @return NV_OK if the PTEs were identified successfully, error otherwise.
*/
static NV_STATUS get_io_ptes(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
NvU64 **pte_array)
{
NvU64 i;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0)
{
return NV_ERR_INVALID_ADDRESS;
}
pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT);
if (i == 0)
continue;
//
// This interface is to be used for contiguous, uncacheable I/O regions.
// Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided
// flags against this, and creates a single memory descriptor with the same
// attributes. This check ensures the actual mapping supplied matches the
// user's declaration. Ensure the PFNs represent a contiguous range,
// error if they do not.
//
if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE))
{
return NV_ERR_INVALID_ADDRESS;
}
}
return NV_OK;
}
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
void *address,
NvU64 page_count,
NvU64 **pte_array
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long pfn;
NvUPtr start = (NvUPtr)address;
void **result_array;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
// find the first VMA which intersects the interval start_addr..end_addr-1,
vma = find_vma_intersection(mm, start, start+1);
// Verify that the given address range is contained in a single vma
if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) ||
!((vma->vm_start <= start) &&
((vma->vm_end - start) >> PAGE_SHIFT >= page_count)))
{
nv_printf(NV_DBG_ERRORS,
"Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n",
start ,page_count);
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
if (nv_follow_pfn(vma, start, &pfn) < 0)
{
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
if (rmStatus == NV_OK)
*pte_array = (NvU64 *)result_array;
done:
nv_mmap_read_unlock(mm);
if (rmStatus != NV_OK)
{
os_free_mem(result_array);
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lock_user_pages(
void *address,
NvU64 page_count,
void **page_array,
NvU32 flags
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct page **user_pages;
NvU64 i;
NvU64 npages = page_count;
NvU64 pinned = 0;
unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0;
long ret;
#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT)
gup_flags |= FOLL_LONGTERM;
#endif
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&user_pages,
(page_count * sizeof(*user_pages)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
ret = NV_PIN_USER_PAGES((unsigned long)address,
npages, gup_flags, user_pages);
if (ret > 0)
{
pinned = ret;
}
#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \
(defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \
defined(NV_GET_USER_PAGES_HAS_VMAS_ARG))
//
// NV_PIN_USER_PAGES() passes in NULL for the vmas parameter (if required)
// in pin_user_pages() (or get_user_pages() if pin_user_pages() does not
// exist). For kernels which do not contain the commit 52650c8b466b
// (mm/gup: remove the vma allocation from gup_longterm_locked()), if
// FOLL_LONGTERM is passed in, this results in the kernel trying to kcalloc
// the vmas array, and since the limit for kcalloc is 4 MB, it results in
// NV_PIN_USER_PAGES() failing with ENOMEM if more than
// NV_NUM_PIN_PAGES_PER_ITERATION pages are requested on 64-bit systems.
//
// As a workaround, if we requested more than
// NV_NUM_PIN_PAGES_PER_ITERATION pages and failed with ENOMEM, try again
// with multiple calls of NV_NUM_PIN_PAGES_PER_ITERATION pages at a time.
//
else if ((ret == -ENOMEM) &&
(page_count > NV_NUM_PIN_PAGES_PER_ITERATION))
{
for (pinned = 0; pinned < page_count; pinned += ret)
{
npages = page_count - pinned;
if (npages > NV_NUM_PIN_PAGES_PER_ITERATION)
{
npages = NV_NUM_PIN_PAGES_PER_ITERATION;
}
ret = NV_PIN_USER_PAGES(((unsigned long) address) + (pinned * PAGE_SIZE),
npages, gup_flags, &user_pages[pinned]);
if (ret <= 0)
{
break;
}
}
}
#endif
nv_mmap_read_unlock(mm);
if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
NV_UNPIN_USER_PAGE(user_pages[i]);
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
*page_array = user_pages;
return NV_OK;
}
NV_STATUS NV_API_CALL os_unlock_user_pages(
NvU64 page_count,
void *page_array,
NvU32 flags
)
{
NvBool write = FLD_TEST_DRF(_LOCK_USER_PAGES, _FLAGS, _WRITE, _YES, flags);
struct page **user_pages = page_array;
NvU32 i;
for (i = 0; i < page_count; i++)
{
if (write)
set_page_dirty_lock(user_pages[i]);
NV_UNPIN_USER_PAGE(user_pages[i]);
}
os_free_mem(user_pages);
return NV_OK;
}

225
kernel-open/nvidia/os-pci.c Normal file
View File

@@ -0,0 +1,225 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
void* NV_API_CALL os_pci_init_handle(
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvU16 *vendor,
NvU16 *device
)
{
struct pci_dev *dev;
unsigned int devfn = PCI_DEVFN(slot, function);
if (!NV_MAY_SLEEP())
return NULL;
dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn);
if (dev != NULL)
{
if (vendor) *vendor = dev->vendor;
if (device) *device = dev->device;
pci_dev_put(dev); /* TODO: Fix me! (hotplug) */
}
return (void *) dev;
}
NV_STATUS NV_API_CALL os_pci_read_byte(
void *handle,
NvU32 offset,
NvU8 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_word(
void *handle,
NvU32 offset,
NvU16 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_dword(
void *handle,
NvU32 offset,
NvU32 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffffffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_byte(
void *handle,
NvU32 offset,
NvU8 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_byte( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_word(
void *handle,
NvU32 offset,
NvU16 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_word( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_dword(
void *handle,
NvU32 offset,
NvU32 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_dword( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NvBool NV_API_CALL os_pci_remove_supported(void)
{
return NV_TRUE;
}
void NV_API_CALL os_pci_remove(
void *handle
)
{
pci_stop_and_remove_bus_device(handle);
}
NV_STATUS NV_API_CALL
os_enable_pci_req_atomics(
void *handle,
enum os_pci_req_atomics_type type
)
{
#ifdef NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT
int ret;
u16 val;
switch (type)
{
case OS_INTF_PCIE_REQ_ATOMICS_32BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP32);
break;
case OS_INTF_PCIE_REQ_ATOMICS_64BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
break;
case OS_INTF_PCIE_REQ_ATOMICS_128BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP128);
break;
default:
ret = -1;
break;
}
if (ret == 0)
{
/*
* GPUs that don't support Requester Atomics have its
* PCI_EXP_DEVCTL2_ATOMIC_REQ always set to 0 even after SW enables it.
*/
if ((pcie_capability_read_word(handle, PCI_EXP_DEVCTL2, &val) == 0) &&
(val & PCI_EXP_DEVCTL2_ATOMIC_REQ))
{
return NV_OK;
}
}
#endif
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL os_pci_trigger_flr(void *handle)
{
struct pci_dev *pdev = (struct pci_dev *) handle;
int ret;
ret = pci_save_state(pdev);
if (ret)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s() PCI save state failed, Skip FLR\n", __FUNCTION__);
return;
}
#if defined(NV_PCIE_RESET_FLR_PRESENT)
// If PCI_RESET_DO_RESET is not defined in a particular kernel version
// define it as 0. Boolean value 0 will trigger a reset of the device.
#ifndef PCI_RESET_DO_RESET
#define PCI_RESET_DO_RESET 0
#endif
ret = pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
if (ret)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s() PCI FLR might have failed\n", __FUNCTION__);
}
#else
nv_printf(NV_DBG_ERRORS,
"NVRM: %s() PCI FLR not supported\n", __FUNCTION__);
#endif
pci_restore_state(pdev);
return;
}

View File

@@ -0,0 +1,356 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#define NV_DEFINE_REGISTRY_KEY_TABLE
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-gpu-info.h"
/*!
* @brief This function parses the PCI BDF identifier string and returns the
* Domain, Bus, Device and function components from the PCI BDF string.
*
* This parser is highly adaptable and hence allows PCI BDF string in following
* 3 formats.
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* @param[in] pci_dev_str String containing the BDF to be parsed.
* @param[out] pci_domain Pointer where pci_domain is to be returned.
* @param[out] pci_bus Pointer where pci_bus is to be returned.
* @param[out] pci_slot Pointer where pci_slot is to be returned.
* @param[out] pci_func Pointer where pci_func is to be returned.
*
* @return NV_TRUE if succeeds, or NV_FALSE otherwise.
*/
static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain,
NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func)
{
char *option_string = NULL;
char *token, *string;
NvU32 domain, bus, slot;
NV_STATUS status = NV_OK;
//
// remove_spaces() allocates memory, hence we need to keep a pointer
// to the original string for freeing at end of function.
//
if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL)
{
// memory allocation failed, returning
return NV_ERR_GENERIC;
}
string = option_string;
if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if ((token = strsep(&string, ".")) != NULL)
{
// PCI device can have maximum 8 functions only.
if ((string != NULL) && (!(*string >= '0' && *string <= '7') ||
(strlen(string) > 1)))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI function in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
else if (string == NULL)
{
*pci_func = 0;
}
else
{
*pci_func = (NvU32)(*string - '0');
}
domain = simple_strtoul(token, &string, 16);
if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI domain/bus in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
bus = simple_strtoul((token + 1), &string, 16);
if (string == NULL)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI bus/slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (*string != '\0')
{
if ((*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
slot = (NvU32)simple_strtoul(token + 1, &string, 16);
if ((slot == 0) && ((token + 1) == string))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
*pci_domain = domain;
*pci_bus = bus;
*pci_slot = slot;
}
else
{
*pci_slot = bus;
*pci_bus = domain;
*pci_domain = 0;
}
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_ARGUMENT;
}
done:
// Freeing the memory allocated by remove_spaces().
os_free_mem(option_string);
return status;
}
/*!
* @brief This function parses the registry keys per GPU device. It accepts a
* semicolon separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
*
* @param[in] sp pointer to nvidia_stack_t struct.
*
* @return NV_OK if succeeds, or NV_STATUS error code otherwise.
*/
NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp)
{
NV_STATUS status = NV_OK;
char *option_string = NULL;
char *ptr, *token;
char *name, *value;
NvU32 data, domain, bus, slot, func;
nv_linux_state_t *nvl = NULL;
nv_state_t *nv = NULL;
if (NVreg_RegistryDwordsPerDevice != NULL)
{
if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL)
{
return NV_ERR_GENERIC;
}
ptr = option_string;
while ((token = strsep(&ptr, ";")) != NULL)
{
if (!(name = strsep(&token, "=")) || !strlen(name))
{
continue;
}
if (!(value = strsep(&token, "=")) || !strlen(value))
{
continue;
}
if (strsep(&token, "=") != NULL)
{
continue;
}
// If this key is "pci", then value is pci_dev id string
// which needs special parsing as it is NOT a dword.
if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0)
{
status = pci_str_to_bdf(value, &domain, &bus, &slot, &func);
// Check if PCI_DEV id string was in a valid format or NOT.
if (NV_OK != status)
{
// lets reset cached pci dev
nv = NULL;
}
else
{
nvl = find_pci(domain, bus, slot, func);
//
// If NO GPU found corresponding to this GPU, then reset
// cached state. This helps ignore the following registry
// keys until valid PCI BDF is found in the commandline.
//
if (!nvl)
{
nv = NULL;
}
else
{
nv = NV_STATE_PTR(nvl);
}
}
continue;
}
//
// Check if cached pci_dev string in the commandline is in valid
// format, else we will skip all the successive registry entries
// (<key, value> pairs) until a valid PCI_DEV string is encountered
// in the commandline.
//
if (!nv)
continue;
data = (NvU32)simple_strtoul(value, NULL, 0);
rm_write_registry_dword(sp, nv, name, data);
}
os_free_mem(option_string);
}
return status;
}
/*
* Compare given string UUID with the GpuBlacklist or ExcludedGpus registry
* parameter string and return whether the UUID is in the GPU exclusion list
*/
NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid)
{
const char *input;
char *list;
char *ptr;
char *token;
//
// When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined
// NVreg_ExcludedGpus takes precedence.
//
if (NVreg_ExcludedGpus != NULL)
input = NVreg_ExcludedGpus;
else if (NVreg_GpuBlacklist != NULL)
input = NVreg_GpuBlacklist;
else
return NV_FALSE;
if ((list = rm_remove_spaces(input)) == NULL)
return NV_FALSE;
ptr = list;
while ((token = strsep(&ptr, ",")) != NULL)
{
if (strcmp(token, uuid) == 0)
{
os_free_mem(list);
return NV_TRUE;
}
}
os_free_mem(list);
return NV_FALSE;
}
NV_STATUS NV_API_CALL os_registry_init(void)
{
nv_parm_t *entry;
unsigned int i;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return NV_ERR_NO_MEMORY;
}
if (NVreg_RmNvlinkBandwidth != NULL)
{
rm_write_registry_string(sp, NULL,
"RmNvlinkBandwidth",
NVreg_RmNvlinkBandwidth,
strlen(NVreg_RmNvlinkBandwidth));
}
if (NVreg_RmMsg != NULL)
{
rm_write_registry_string(sp, NULL,
"RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg));
}
//
// CoherentGPUMemoryMode=driver just implies the older
// EnableUserNUMAManagement=0 option
//
if (NVreg_CoherentGPUMemoryMode != NULL)
{
if (strcmp(NVreg_CoherentGPUMemoryMode, "driver") == 0)
{
NVreg_EnableUserNUMAManagement = 0;
}
}
rm_parse_option_string(sp, NVreg_RegistryDwords);
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
{
rm_write_registry_dword(sp, NULL, entry->name, *entry->data);
}
nv_kmem_cache_free_stack(sp);
return NV_OK;
}

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL os_match_mmap_offset(
void *pAllocPrivate,
NvU64 offset,
NvU64 *pPageIndex
)
{
nv_alloc_t *at = pAllocPrivate;
NvU64 i;
for (i = 0; i < at->num_pages; i++)
{
if (at->flags.contig)
{
if (offset == (at->page_table[0].phys_addr + (i * PAGE_SIZE)))
{
*pPageIndex = i;
return NV_OK;
}
}
else
{
if (offset == at->page_table[i].phys_addr)
{
*pPageIndex = i;
return NV_OK;
}
}
}
return NV_ERR_OBJECT_NOT_FOUND;
}

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMP2PDEFINES_H_
#define _RMP2PDEFINES_H_
#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10)
#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10)
#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10)
#endif