Updating prebuilts and/or headers

af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md
ac7f91dfb6c5c469d2d8196c6baebe46ede5aee0 - CHANGELOG.md
1b03ad8c20ddb6d129ade64846377cc86ce4c1de - README.md
d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile
ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md
5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md
7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING
6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh
7ad4bb8aebd57a9be26329a611b14c5a70ccf2b7 - nouveau/extract-firmware-nouveau.py
36f9753dbbef7dd5610312d5b14bffac1a93cee4 - nouveau/nouveau_firmware_layout.ods
80545889e3c9967fd0ae12a65005be31bac354f2 - src/nvidia-modeset/Makefile
80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
36c20e9c111e66601b025802f840e7b87d09cdde - src/nvidia-modeset/kapi/interface/nvkms-kapi.h
27612b72a77ac67cd468ac7f15948d2ad78defed - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
727bd77cfbc9ac4989c2ab7eec171ceb516510aa - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
01d943d6edb0c647c2b8dbc44460948665b03e7a - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c
67fe73dc7149daf807194bd9a0f96252cb452179 - src/nvidia-modeset/kapi/src/nvkms-kapi.c
2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h
8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h
d51449fa2fd19748007f2e98f0233c92b45f9572 - src/nvidia-modeset/interface/nvkms-api-types.h
4da2125966732a80fc154cea4b18b2372b12501e - src/nvidia-modeset/interface/nvkms-api.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h
ebafc51b2b274cd1818e471850a5efa9618eb17d - src/nvidia-modeset/include/nvkms-prealloc.h
118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - src/nvidia-modeset/include/nvkms-modepool.h
6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h
1b75646c99c748f9070208eb58f0082812eabbd9 - src/nvidia-modeset/include/nvkms-private.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h
6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h
4a33d410f090fd4f4dfc9a6de285f8e8fb1c9ced - src/nvidia-modeset/include/nvkms-surface.h
c90e4393f568d96bc98cb52a93bfc3fdea10658d - src/nvidia-modeset/include/nvkms-modeset-workarea.h
8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h
fa829f1cd3b73f194f39879c48962b703f640b65 - src/nvidia-modeset/include/nvkms-vrr.h
c869ccfcda419d80b6691d3667c4e9196493065e - src/nvidia-modeset/include/nvkms-modeset-types.h
ec1374d339746b73bc7c7614695fde68c156074a - src/nvidia-modeset/include/nvkms-rm.h
07ac47b52b1b42c143501c4a95a88a3f86f5be03 - src/nvidia-modeset/include/nvkms-hdmi.h
11bae7c491bbb0ba4cad94b645d47c384191fa5c - src/nvidia-modeset/include/nvkms-flip.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h
cdf54b0d423f94f04d6f33b672c131125c13d260 - src/nvidia-modeset/include/nvkms-hw-flip.h
377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h
8a0ced82697c32b97a80fa3366704014879610e7 - src/nvidia-modeset/include/nvkms-flip-workarea.h
b0d407b0413453ec71481f84cc448d090b90d609 - src/nvidia-modeset/include/nvkms-evo3.h
496b94af536dd912866a05f7b2da53050b50c2f5 - src/nvidia-modeset/include/nvkms-prealloc-types.h
35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h
15dddd9307fa7ac201bd9ebc1e35e6ac0d2cf6c9 - src/nvidia-modeset/include/nvkms-evo.h
b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h
c1c7047929aafc849a924c7fa9f8bc206b8e7524 - src/nvidia-modeset/include/g_nvkms-evo-states.h
49af4a8fa95d0e595deacadbca5360f097722e7f - src/nvidia-modeset/include/nvkms-evo1.h
eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h
4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h
d05ef9a837f2927fe387e7d157ea76c7ef567807 - src/nvidia-modeset/include/nvkms-lut.h
d57ae79509c667e8d16a4756d85e3564c1b1ac34 - src/nvidia-modeset/include/nvkms-modeset.h
ae03509966df56d98fa72b7528ab43ec2b258381 - src/nvidia-modeset/include/nvkms-utils.h
81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h
fa8dbffe58d345634ab1ea8743ed29c9ec169f36 - src/nvidia-modeset/include/nvkms-dpy.h
691731826d6daa3bb5a3847a3dd2424d513113c4 - src/nvidia-modeset/include/nvkms-types.h
a79cfb74026085b0aa612c0ae6789083e196bbc2 - src/nvidia-modeset/include/nvkms-evo-states.h
a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - src/nvidia-modeset/include/dp/nvdp-timer.h
4625828efd425e1b29835ab91fcc3d2d85e92389 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
ae43c46687d16b93189047d9eeed933a67e5571f - src/nvidia-modeset/include/dp/nvdp-connector.h
c386632dbdc0e89019d5618f132dbcb3dff4dafb - src/nvidia-modeset/include/dp/nvdp-device.h
bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c
30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c
2b304663f2a005b5ccdecfafb69a3407f2feeb18 - src/nvidia-modeset/src/nvkms-evo2.c
fd64ffbcc1efd446fb3352ceaa8bd4221b23a1d2 - src/nvidia-modeset/src/nvkms-modeset.c
3e723edf2a0a2f4f93032feb4aeaaf7fd0acddfa - src/nvidia-modeset/src/g_nvkms-evo-states.c
2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c
488724910d9a3bf530303a4fa0889983d11ce5c0 - src/nvidia-modeset/src/nvkms-hdmi.c
761c8540278a1ffb9fe4aa0adb1b4ee95524787a - src/nvidia-modeset/src/nvkms-hal.c
54b41301663dc9fdc45d24c7a43ad4a980821f9d - src/nvidia-modeset/src/nvkms-attributes.c
3261fd9a1eb14f7f3fb0917757b1e2704d4abbd2 - src/nvidia-modeset/src/nvkms-hw-states.c
6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c
7d0e38f9d79e0c928bdc67276b8ecb0c18470b88 - src/nvidia-modeset/src/nvkms-hw-flip.c
03fb499633c485e0559da79500d4e66ea50e8d8f - src/nvidia-modeset/src/nvkms-framelock.c
05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c
dff88ceaf95239b51b60af915f92e389bb844425 - src/nvidia-modeset/src/nvkms-cursor.c
f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c
4d81c3052a0793d180642e3367b7870863015ef2 - src/nvidia-modeset/src/nvkms-rm.c
9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c
403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c
b890da1d428f30483d6f69e662218f19c074d011 - src/nvidia-modeset/src/nvkms-evo3.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c
b7232f4b4b8f0d4c395c241c451fc17b6ab84d7f - src/nvidia-modeset/src/nvkms-evo.c
6f2eb25d57d2dc3c1e5db869cfbdf556878d3332 - src/nvidia-modeset/src/nvkms-console-restore.c
94e9c19b7b6a5e56fd46b0885e7dd6fe698fe2df - src/nvidia-modeset/src/nvkms-prealloc.c
bf1b007fceaa1c38771f9e7d1130f9c0c3eddd80 - src/nvidia-modeset/src/nvkms-lut.c
e13960662d872f84dd77f36f778aee0521b4ff54 - src/nvidia-modeset/src/nvkms-modepool.c
b13bd89b5ac60ceab56e9c2398cf7668375ab7ad - src/nvidia-modeset/src/nvkms-flip.c
9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c
df59641109db4529eed62cf156b1815a3e67ba05 - src/nvidia-modeset/src/nvkms-vrr.c
f4a02d5b6cb1fa5d461514b21e13002ad9cfa1a4 - src/nvidia-modeset/src/nvkms-evo1.c
9e4d3e3505a84d8634a2ef2307628a8fe551a4c3 - src/nvidia-modeset/src/nvkms-surface.c
2fa9d9b3cbeeb9406f2dd51a4f4a5d53844a31c9 - src/nvidia-modeset/src/nvkms-dpy.c
a49319a235d8746b771a7c418277e168a291259f - src/nvidia-modeset/src/nvkms.c
6a35b80a6995777dc9500cac9659e6f0f0c12d23 - src/nvidia-modeset/src/nvkms-cursor3.c
a90b2c295271631b4c3abe6afb8dfd92d6b429c8 - src/nvidia-modeset/src/dp/nvdp-connector.cpp
c19775aebdaaaee3500378d47af6ff0b8eb486b8 - src/nvidia-modeset/src/dp/nvdp-device.cpp
51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
f96cd982b4c05351faa31d04ac30d6fa7c866bcb - src/nvidia-modeset/src/dp/nvdp-timer.cpp
535ce9f743903eb83a341eef1be812f4e4b50887 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
a2a4b7063fa903cc434163ebceb7c8d48f703c33 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp
110ac212ee8832c3fa3c4f45d6d33eed0301e992 - src/nvidia-modeset/src/dp/nvdp-host.cpp
252660f72b80add6f6071dd0b86288dda8dbb168 - src/nvidia-modeset/os-interface/include/nvkms.h
6e4ae13d024a1df676736752df805b6f91511009 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c
7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c
824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c
729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c
68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c
dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c
21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c
d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c
00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c
c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c
1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c
86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c
fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c
e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c
0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c
f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c
0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c
760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c
9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c
29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c
8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c
054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h
de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - src/common/softfloat/source/include/primitives.h
f36c896cfa01f1de9f9420189319e4e00c7fc52a - src/common/softfloat/source/include/internals.h
1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h
9645e179cf888bcd0e3836e8126b204b4b42b315 - src/common/softfloat/source/include/softfloat.h
21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
86cda6550cb02bbf595d1667573e4be83702a95e - src/common/softfloat/source/8086-SSE/specialize.h
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp
9f31213ab8037d7bb18c96a67d2630d61546544a - src/common/displayport/src/dp_mst_edid.cpp
818efd113374de206a36ccf2bf594b4e433a0b85 - src/common/displayport/src/dp_evoadapter.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp
de264916d0e3e873a4c624f237ea228469d0a980 - src/common/displayport/src/dp_watermark.cpp
554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - src/common/displayport/src/dp_discovery.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp
4803cde0fffcf89fed46d6deaeba5c96c669a908 - src/common/displayport/src/dp_messageheader.cpp
ca92fed27d4c5ca5e9495df08e63d5f446e7f24b - src/common/displayport/src/dp_deviceimpl.cpp
d2f8d43d650d9c0b4a8d9b8070087f13efdaac79 - src/common/displayport/src/dp_connectorimpl.cpp
b18924b1d50232b92223355f608fcca1b6d7ff46 - src/common/displayport/src/dp_configcaps.cpp
f4493ab7efc7030b4cd17bf792981a9dca497e29 - src/common/displayport/src/dp_groupimpl.cpp
37eabb1ab51cb38660eb24e294c63c8320750b96 - src/common/displayport/src/dp_sst_edid.cpp
fa4f4869d3d63c0180f30ae3736600a6627284c6 - src/common/displayport/src/dp_merger.cpp
98cec6b663cf630c789e9823675cbb4948e1ba5e - src/common/displayport/src/dp_edid.cpp
fbd877bac2efc8ee33e4e108e61c961e1fc42f44 - src/common/displayport/src/dp_messagecodings.cpp
aa2e56f6c66bf91c2b4a6030de2d29480f69710e - src/common/displayport/src/dp_wardatabase.cpp
1543bbaba8f3e149239cf44be3c0d080c624d5ba - src/common/displayport/src/dp_buffer.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp
45da2aabdaf6b5b2bf17a3deeb045feed1545415 - src/common/displayport/src/dp_messages.cpp
f83b3c17e9f26651f12c8835a682abdd66aed3a2 - src/common/displayport/src/dp_splitter.cpp
56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - src/common/displayport/src/dp_vrr.cpp
d991afdb694634e9df756184b5951739fc3fd0ab - src/common/displayport/src/dp_auxretry.cpp
719d2ddbfb8555636496cb5dd74ee6776059db92 - src/common/displayport/src/dp_timer.cpp
fe8007b3d98dad71b17595ecb67af77b198827a0 - src/common/displayport/src/dptestutil/dp_testmessage.cpp
36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - src/common/displayport/inc/dp_groupimpl.h
d876d77caef3541ae05f310857f3d32e642fba04 - src/common/displayport/inc/dp_auxdefs.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h
e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h
80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h
2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h
72f91aac76264d34ce778489f5ce839e03833db8 - src/common/displayport/inc/dp_messages.h
2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - src/common/displayport/inc/dp_timer.h
325818d0a4d1b15447923e2ed92c938d293dc079 - src/common/displayport/inc/dp_hostimp.h
9a0aa25938adf3bda9451aeab67fb04e266d771d - src/common/displayport/inc/dp_deviceimpl.h
df11366a5bcfb641025f12cddf9b5e8c2ed008de - src/common/displayport/inc/dp_watermark.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h
c2f5f82ddf1d0b5c976264ceb14fe9b67bf12851 - src/common/displayport/inc/dp_messagecodings.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h
07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h
8d8a5f0160922b6630fa796789c5d59cce94d9e0 - src/common/displayport/inc/dp_configcaps.h
7b7d9a137027fbbedfc041465987fa4ed4198ce4 - src/common/displayport/inc/dp_edid.h
34e808f745eaaff13aeb4e6cde1a8ce35f7b9def - src/common/displayport/inc/dp_connector.h
36d3c602cbbf0a52d574f841ba1b75125ec3b24a - src/common/displayport/inc/dp_linkconfig.h
29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h
02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h
d0b72ca2db108478bba75393c7255356da0e8233 - src/common/displayport/inc/dp_regkeydatabase.h
a3fc03562a3fa0968ab8d4a50424465174392f0e - src/common/displayport/inc/dp_connectorimpl.h
eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h
e70068249ebb59040a3e3be1fc4248d714550e61 - src/common/displayport/inc/dp_evoadapter.h
2a81681efef7ffced62c6d64cfdbc455d85fdb0a - src/common/displayport/inc/dp_mainlink.h
11487c992494f502d1c48ff00982998504336800 - src/common/displayport/inc/dp_internal.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h
e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h
020194b85245bad5de4dfe372a7ccb0c247d6ede - src/common/displayport/inc/dptestutil/dp_testmessage.h
edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c
26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c
bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - src/common/unix/common/utils/unix_rm_handle.c
07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h
667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h
9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h
881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h
1c947cfc8a133b00727104684764e5bb900c9d28 - src/common/unix/common/inc/nv_mode_timings.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h
78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h
5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h
751abf80513898b35a6449725e27724b1e23ac50 - src/common/sdk/nvidia/inc/nvmisc.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
fa267c903e9c449e62dbb6945906400d43417eff - src/common/sdk/nvidia/inc/nvlimits.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h
f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h
4a97d807a225d792544578f8112c9a3f90cc38f6 - src/common/sdk/nvidia/inc/nvstatuscodes.h
5cf4b517c9bd8f14593c1a6450078a774a39dd08 - src/common/sdk/nvidia/inc/nv-hypervisor.h
56cca793dd7bcbc4a3681677a822fc9f7a11a091 - src/common/sdk/nvidia/inc/nvos.h
7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h
e7a5fa74517ecd7f617860f01c5523bc5acd6432 - src/common/sdk/nvidia/inc/rs_access.h
b3de92f4edb1fcc856fd62b74359c9cd447519a8 - src/common/sdk/nvidia/inc/nverror.h
c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h
edf1f7d1457b015aa92c12f74f9ffa1e2f86a821 - src/common/sdk/nvidia/inc/nvtypes.h
b5dedeada189123f1965650827bf8a8193383a92 - src/common/sdk/nvidia/inc/nvimpshared.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h
7c03663f5e12754572e6efcbe09f51ec2c5f6502 - src/common/sdk/nvidia/inc/g_finn_rm_api.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h
95b0de4e76d9cc1bf49ef953fc00aa47e238ccd2 - src/common/sdk/nvidia/inc/nvfixedtypes.h
0edffddbe7764b268f724abc4ac84924767d1bf2 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
8607fdd8ecaa5140bac6643a3f715610ed391d67 - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
1cd4acc266f26dba813ac8802dba4e7ab381f753 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
175ad4d300fa40b960d07fee059b51c6b8639f01 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h
b35f86170f27005bc714b37edc96dffb97691bd4 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h
f64c19679dc9a20e62ef86d01878a006b505ed93 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h
72164895b0055a1942e1190a05d5090753af95a1 - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
360ed7fefcd6f8f4370b3cf88d43a9f8eec1e86d - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h
90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
7433f9674e36f120671d6e1802f2cdbcaadc58c3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
4fc1dd23cbfdb4ce49f1722f6e282cd21f33b7f5 - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h
a75a0a693d5742c8aecd788dc204a69863cfaf39 - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h
3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
fcdf7b331c3f7744d296918e68d44dfb114b9461 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h
b4cecb527cdc3ba4e68ca0031ac2179756108cb0 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
6627bf1716c0e06e870c083d264753d6a0abb439 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
a002a436f77b9544041a259405dddba90301df01 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
59340a74f26b92f689fe99f8303775c87a4bbd58 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
48691dd2c8d93fbd162e207cdb5d27ea30741d36 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
496c7a1a0c283b25a637a996995d3987c9045346 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
d5cdbcd10e049e8daf48feb5347f070d4ef85f8b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h
347efee37fa9404ce1933f01a7aa8a43b229db44 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
27341c2b0ad4eb10044fdf9fc2377024b4c63297 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
e8d117ea0d596ed6415324bd136de337f1a36ff1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
ae428e2b33fd058eeaffbbd4fbcd42178345883c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
18ed4b62c824c252abdd89a6616e3cc325ffa7fa - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
c1e506bd4bb6ad792c802961a9e03b371abb6919 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
5013ec94fa6311100818efb422b013ed77cffe82 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
86737d12192b2e7dc878bbeb8e57a41dcc1a655e - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
920f69f6d8386a107160da834545f71172cc2f0f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
bf976b3c428ccb9cb80d2f84f80b2c33d96e6ce1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
fed713e236b4fbc1e71dcf6747182ebea5836318 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
3db5bcbcae4063f2356ec76924b4bcc1d0df1a05 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
55cee85b56cb6ed5d017bab55c40cc8799789c8b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
66aa4e08f838e1f87e4babacb42d3d59cb6837ff - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h
42dc8204c0f6da47c5f741344032fc02702cfac5 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
4f31fe752e050953a0f87d04063dc152bba261fe - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
01a6a431e8aeffeec97755009b4e9575bdf0de7b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
22b8cc6c4677e664904659c726425a62aa24124e - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
4fa54b01cd70c3ca3b5cac93bade62dd09641b97 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h
74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
6c467ece3508071c2b3a296afffedd592726f8de - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
d411633fdeae66035e8c018ec8f6f25a9d5dd462 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
cfe695da65835f26c82399db0e44a56c7162c180 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
6975ff971c7ed1ac1a429896a3be1d95353fa4bd - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
0710ae87ce40008bea9181310b755ed74c397bfe - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
a8384da236fdd365d15d26daeb7db1c117ce1072 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
3ab2fc007f2c76ddc89caf14c4db0ab530515d4a - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
5f4b08b9ee7853eb33269ef7b415050eac2d702a - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
50f2ef0c01ab81077bd0e313d9ff168faae91670 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
ce4e42c8e73047ae03f835f9d3655dda1eb44452 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
53134475c1fd9c228a2c607051b34c28a5a80b03 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h
a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
0acaf597e0fc8f59a99b1772b7370395513492ed - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h
fa763827e4359b2deb6307ef742474f8f6f960dd - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
67a911b3177b75243e2fceef821ebcfd3668235e - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h
9279520e7dec45516d5339d82d35eb60b88f7300 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h
ce19b7278c6720b3bee62bcaa763ebb322d91957 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
e919b586a0e44cfe96b819deeab2c21c6af34f55 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h
cebcfa209648731e86af526834717b19d5b24159 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fevent.h
83d495dfe528167aa8ddbf45091051a89bd1a262 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h
6bc78fd963e407de843598786bdbcd1653005328 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
ebf415ac7d55643fa24493f27b69a843ea05f6c7 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
dd49db523d761d6f14e3890549cd8186c25f1d62 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
4e7733c378eb6f7924e43ff111017ae0e433800d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
668e6d37c0a556a70ae003569fe237b1024d6e6b - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
c341344b0879c5e9c7ba9ac0005eb28b347eaa63 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
1cef17e1833c002d968a2255726a4f785e4e66e7 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
899e3bc9a551ca0b181d1c8dd7ef8d779a66ecc4 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
316494234df96c6af34cc0bd2b1c791dc42ac92b - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
7edd8cdb8061ec137bc29d0dbbfbb5d169c0fd35 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
68bdc682ee42784c09409cd581bb991f7fc1bf41 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
12f1e560480dafde75646fb41aa349d9d729ca7d - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
ec7b09fe14c31c175e0abfcfa85dee20d57d02b4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h
12f1d3bb13c72fb1b52b62cf2a21f1b15619c06d - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
e238d87a94267f62141c413d0c44f03f27204b33 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
ea6d95de011af0039b1adc209733e524bc583c92 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
e0c551dc47bc06f8dff5884affdeb05eb118609f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
ccba5f12df1bce4b4235eed5a1c7a0cd2612c2ce - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
8e85550f24771c87138a973cd8cd714e419a14e8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
6fb840928970cf39919f2f415932bcc3e0764b25 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
33716a49ba4f7fcc0faa889d535e370a14edd582 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h
c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h
31534360d235be6dfdf4c1cf3854ce1e97be8fe2 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
f9db227bd1cefe92e4f35b52cafcb15266630582 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
456707a5de78815fc6a33f2da7e2a2a45ccc4884 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
d2992c1a9aac5b1b5cfefcca72e9a2401190158c - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
f9f404124a718ace14803ebe84efe752fcef816b - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
ff78c1bb58b1946f3e75e053be9f2b5de443e2f4 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
abed22b35137e2d40399eb4ed01724aa789cb635 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
505860d3cd6f7d5144f97195b9fb32dd5b8f74aa - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
1066e2e0a0633b0dd1b9114f31079c30178a5ac8 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
41a588413e1b13f0f3eec6647ffc7023dfaf651f - src/common/sdk/nvidia/inc/alloc/alloc_channel.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h
d90649c6a6c491bf086958426b56c697222e10bc - src/common/sdk/nvidia/inc/class/cl00fe.h
e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h
c61f8348c2978eef0a07191aaf92bd73e935f7bd - src/common/sdk/nvidia/inc/class/clc67e.h
2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h
05605d914edda157385e430ccdbeb3fcd8ad3c36 - src/common/sdk/nvidia/inc/class/cl9171.h
f968cd35ce1d1d8e3bc2f669025e6b1042b35354 - src/common/sdk/nvidia/inc/class/cl00de.h
7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h
1efc9d4aa038f208cd19533f6188ac3a629bf31a - src/common/sdk/nvidia/inc/class/cl917a.h
435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h
4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h
326dbbeb275b4fc29f6a7e2e42b32736474fec04 - src/common/sdk/nvidia/inc/class/cl9571.h
31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h
d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h
7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h
fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - src/common/sdk/nvidia/inc/class/clc671.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h
a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - src/common/sdk/nvidia/inc/class/clc67b.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h
9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h
02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h
fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h
13f8e49349460ef0480b74a7043d0591cf3eb68f - src/common/sdk/nvidia/inc/class/clc57b.h
9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h
c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h
bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h
e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h
513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h
5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h
8b75d2586151302d181f59d314b6b3f9f80b8986 - src/common/sdk/nvidia/inc/class/clc573.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h
5556b1c2e267d1fda7dee49abec983e5e4a93bff - src/common/sdk/nvidia/inc/class/cl2080_notification.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h
cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h
ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h
4a6444c347825e06bdd62401120553469f79c188 - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h
89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h
2e3d5c71793820d90973d547d8afdf41ff989f89 - src/common/sdk/nvidia/inc/class/clc67a.h
2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h
60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h
f5760f5054538f4ecf04d94fb1582a80a930bc29 - src/common/sdk/nvidia/inc/class/clc673.h
99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h
992b395855033b4a1fa7536d0de6ab2d071a5f82 - src/common/sdk/nvidia/inc/class/clc77d.h
36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h
204feb997ba42deab327d570e5f12235d5160f00 - src/common/sdk/nvidia/inc/class/clc57a.h
b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h
83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h
127f78d2bb92ef3f74effd00c2c67cf7db5382fe - src/common/sdk/nvidia/inc/class/clc67d.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h
bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h
c2600834921f8a6aad6a0404076fa76f9bc1c04d - src/common/sdk/nvidia/inc/class/clc37b.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h
026f66c4cc7baad36f1af740ae885dae58498e07 - src/common/sdk/nvidia/inc/class/clc371.h
2f87e87bcf9f38017ad84417d332a6aa7022c88f - src/common/sdk/nvidia/inc/class/cl9471.h
0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - src/common/sdk/nvidia/inc/class/cl0000_notification.h
b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - src/common/sdk/nvidia/inc/class/cl0073.h
b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h
15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h
11b19cb8d722146044ad5a12ae96c13ed5b122b6 - src/common/sdk/nvidia/inc/class/cl917b.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h
f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h
20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h
9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h
31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h
a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h
78efa8d42f828c89cd2a62b8c3931ebd0b0a6476 - src/common/sdk/nvidia/inc/class/clc771.h
9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h
060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h
022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h
a7c7899429766c092ee3ecf5f672b75bef55216c - src/common/sdk/nvidia/inc/class/cl9271.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h
74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h
a75d43f7b84d4cb39f8a2be35c12b2d2735f0ad9 - src/common/sdk/nvidia/inc/class/cl0000.h
16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h
a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h
866977d299eac812b41eb702a517e27bdc56e875 - src/common/sdk/nvidia/inc/class/clc37a.h
556d925de1e686243db36090cc35927f6d53c8bc - src/common/inc/nvUnixVersion.h
b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h
8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h
d877f4b99ae7d18cc5c78b85e89c0a7e3f3e8418 - src/common/inc/nvPNPVendorIds.h
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h
e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h
4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h
56f837b06862884abb82686948cafc024f210126 - src/common/inc/nvlog_defs.h
e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h
87bb66c50d1301edb50140e9896e1f67aaaa7175 - src/common/inc/nvVer.h
d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h
6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h
b58ed1b4372a5c84d5f3755b7090b196179a2729 - src/common/inc/nv_speculation_barrier.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h
4df0a4ae78271bb5b295288798d5be7866242adc - src/common/inc/nvctassert.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h
5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h
714db3678cd564170ec05022de6c37686da9df23 - src/common/inc/pex.h
90998aac8685a403fdec9ff875f7436373d76f71 - src/common/inc/displayport/dpcd14.h
ee0105d1113ce6330939c7e8d597d899daae662e - src/common/inc/displayport/dpcd.h
1fc95a17ddb619570063f6707d6a395684bfa884 - src/common/inc/displayport/dpcd20.h
669268ea1660e9e5b876f90da003599ba01356bb - src/common/inc/displayport/displayport.h
bbcecae47807b4578baa460da4147328140ecfcd - src/common/inc/swref/published/nv_ref.h
1efbc285d851a4430776a945d8c250b6a7019ab5 - src/common/inc/swref/published/nv_arch.h
38edc89fd4148b5b013b9e07081ba1e9b34516ac - src/common/inc/swref/published/turing/tu102/kind_macros.h
86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h
64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h
3cddaacf90bbbefedf500e6af7eaefb0f007813c - src/common/inc/swref/published/disp/v03_00/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h
54c516f23671ec703a4e000f700c16dce640367a - src/common/modeset/timing/nvt_dmt.c
cc04c12ebe4e2f7e31d0619ddd16db0c46b9db9e - src/common/modeset/timing/nvtiming.h
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c
cb1923187030de8ad82780663eb7151b68c3b735 - src/common/modeset/timing/displayid20.h
80063c05e3961073d23f76822bc9b55be533a6ee - src/common/modeset/timing/nvt_edid.c
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h
446e1044fcc8f7711111fca6a49d2776dba6e24c - src/common/modeset/timing/nvt_edidext_displayid.c
aad5d6f2b238b9582a63ba1e467da13d86ee4ded - src/common/modeset/timing/dpsdp.h
49df9034c1634d0a9588e5588efa832a71750a37 - src/common/modeset/timing/nvt_cvt.c
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h
2868a1ecc76e5dd57535929890b922028522f4b5 - src/common/modeset/timing/nvt_edidext_861.c
5b1ce39d595dfb88141f698e73b0a64d26e9b31d - src/common/modeset/timing/nvt_dsc_pps.c
04693ced0777456f6b7005f19a4b7c39a6d20ee6 - src/common/modeset/timing/nvtiming_pvt.h
28d7b753825d5f4a9402aff14488c125453e95c5 - src/common/modeset/timing/nvt_tv.c
849309f12f14d685acf548f9eed35fadea10c4e7 - src/common/modeset/timing/nvt_edidext_displayid20.c
890d8c2898a3277b0fed360301c2dc2688724f47 - src/common/modeset/timing/nvt_util.c
783bd7a92ca178ca396b15e8027561c8b61c09a3 - src/common/modeset/timing/nvt_displayid20.c
974f52eb92bda6186510c71a2b6ae25cb0514141 - src/common/modeset/timing/nvt_dsc_pps.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c
f2b434ed8bdd7624143654b7b3953d8c92e5a8e2 - src/common/modeset/hdmipacket/nvhdmipkt_common.h
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c
60ee78d72d4d6b03932b7111508784538f35381a - src/common/modeset/hdmipacket/nvhdmipkt.c
443c0a4b17a0019e4de3032c93c5cac258529f01 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h
bb634bc2517a2653be2534602ab0f4712e0b1363 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c
9fbe6313ee438f301ac75f5ca2228e27b785c4f4 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
381e1b8aeaa8bd586c51db1f9b37d3634285c16a - src/common/modeset/hdmipacket/nvhdmipkt_class.h
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c
1babb2c7f11b95fd69bcbc9dcffeefea29d61118 - src/common/modeset/hdmipacket/nvhdmipkt_C671.c
a1f52f0f78eec1d98b30b0f08bc1c5e88ae3d396 - src/common/modeset/hdmipacket/nvhdmipkt.h
4de33a60116ce3fa3f440db105561eddc21ce375 - src/common/shared/nvstatus/nvstatus.c
a71d2c98bc2dc5445436cd96ac5c7e6a57efcf84 - src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h
bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h
d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h
75d3a4e35230b114a2a233be8235f19220d953a4 - src/nvidia/interface/nvrm_registry.h
60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h
7dec210405c35d200be24bd1c0c81fcc6c3f93bf - src/nvidia/interface/deprecated/rmapi_deprecated.h
f7b69924dbdf53be6cd184583145726aa65d3acd - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
d81ef382635d0c4de47dfa3d709e0702f371ceb7 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c
253baf641e4e29ede6a49129c2dd1415b7e5d9bd - src/nvidia/kernel/inc/nvpcf.h
6f9edcff7ad34c4e85ec7c0b8d79c175009d438c - src/nvidia/kernel/inc/objrpc.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/kernel/inc/objtmr.h
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/kernel/inc/tmr.h
b5f3932b9f6e7223e8c755155b60be98fd0a21df - src/nvidia/kernel/inc/vgpu/rpc_global_enums.h
961ed81de50e67eadf163a3a8008ce1fde1d880c - src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h
31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - src/nvidia/kernel/inc/vgpu/rpc_vgpu.h
6006a612fcd546de794676da19fc431ddd0410e5 - src/nvidia/kernel/inc/vgpu/rpc.h
3477a139633890d3fdd2e5e02044e1a293566e3d - src/nvidia/kernel/inc/vgpu/rpc_headers.h
9b8e6b29a48ff022dda092cc8139dbe5ac6dedd8 - src/nvidia/generated/g_rs_client_nvoc.c
d0a43a5d4941392b3c6c1b5a0d156edc26559ded - src/nvidia/generated/g_disp_inst_mem_nvoc.c
f1e98f21f75eaba821fe16f2410921a4fd7c54ee - src/nvidia/generated/g_mem_mgr_nvoc.h
b0089bee11caa0d8994b39eaecfb42ca3507de37 - src/nvidia/generated/g_syncpoint_mem_nvoc.h
c2eae693c1b8d8502db368048f3b1c45d0576dc5 - src/nvidia/generated/g_chips2halspec_nvoc.h
0b2233e5cb68257231dd94310559bc09635c8279 - src/nvidia/generated/g_generic_engine_nvoc.c
e41a55d75416e6d9978d2cf788553acdb9336afd - src/nvidia/generated/g_resource_nvoc.c
14336cd31573538728e0bf17941681b9d91d2b12 - src/nvidia/generated/g_gpu_access_nvoc.c
b18ed7a5d71571b57266995f0d30317814e8bd6e - src/nvidia/generated/g_gpu_access_nvoc.h
76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - src/nvidia/generated/g_traceable_nvoc.c
42fac2ccb00006825e7d42a6b23264870365ace6 - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h
b3b3ee6b514249e553187dc14a98f74bdd9fa6c6 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h
fcb89aff81d5e2b0a4a39069356ee4644bf53b2b - src/nvidia/generated/g_os_nvoc.c
eefa27872e4acde78a18211b8ab51bc5436b6cfe - src/nvidia/generated/g_nv_debug_dump_nvoc.h
493a547850d9e7cdf74350de0e42aef2f66869a9 - src/nvidia/generated/g_client_resource_nvoc.h
3b08d4bb1612bb193cd2f26229b119cc43284879 - src/nvidia/generated/g_rs_server_nvoc.h
73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - src/nvidia/generated/g_traceable_nvoc.h
19d73b04597bca6d3a7dd82d327e6cbf4a591a65 - src/nvidia/generated/g_eng_state_nvoc.c
0eb34617fea0cc6843d317ba7cea287483e39703 - src/nvidia/generated/rmconfig.h
17c69e14076324c230bbe68b55141089c1f4d10e - src/nvidia/generated/g_os_desc_mem_nvoc.h
1268ee54592c8ae1078b72bfaff882549efbcd3c - src/nvidia/generated/g_disp_capabilities_nvoc.c
bdb198b18c700dc396f73191a8e696d106a1f716 - src/nvidia/generated/g_resource_nvoc.h
b0f47afbc6aefce339db95801f48823989abad8a - src/nvidia/generated/g_mem_desc_nvoc.h
779103a57f68832641a7616ea8c5608780cfc155 - src/nvidia/generated/g_disp_objs_nvoc.h
2a3476812057692ef35f9658d24c275a1576f498 - src/nvidia/generated/g_sdk-structures.h
125b688444f16d9cb3902a9f79959c05c12397e3 - src/nvidia/generated/g_disp_sf_user_nvoc.c
dbf11a9f931cfac248c3e6006bedeadb3d062670 - src/nvidia/generated/g_gpu_group_nvoc.c
ecb4db5b676f0541c851ba9454577812e1a07023 - src/nvidia/generated/g_object_nvoc.c
9b4cf69383d0a7b7492b2fa28983cfe4d88c3263 - src/nvidia/generated/g_vaspace_nvoc.h
d3b89f97bb0f4c5c0ca44e74040aab24c70ae06f - src/nvidia/generated/g_generic_engine_nvoc.h
c1652e6cc404f23660ee440b61c6d0b9149ff593 - src/nvidia/generated/g_gpu_resource_nvoc.c
85580813dbcf78bf4aeecf5e55054447396dcfe3 - src/nvidia/generated/g_gpu_db_nvoc.c
a97bf85ce6681aae086e0415aecaebf0208bfebb - src/nvidia/generated/g_tmr_nvoc.h
31270057a91fcd2dc7dbf1abed9e3f67d8db1787 - src/nvidia/generated/g_rmconfig_private.h
e181d568b36f4d6e717d6d26c7bbe4b4ed968f4f - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h
4302502637f5c4146cb963801258444f2d8173e1 - src/nvidia/generated/g_allclasses.h
61cb019a28b25479d65022226623be2d20f32429 - src/nvidia/generated/g_nv_name_released.h
7f89931ecb53fb0b88da1be5489fe50e3d7897c3 - src/nvidia/generated/g_resserv_nvoc.h
ac3965eea078f1998c3a3041f14212578682e599 - src/nvidia/generated/g_vaspace_nvoc.c
a44899c21c77899b3b8deb7b2613b16841bbf397 - src/nvidia/generated/g_gpu_mgr_nvoc.c
631ac1d7bfa00f66e699937b8cabc0cbbc26d151 - src/nvidia/generated/g_rs_server_nvoc.c
67df2bc381609f290f173ea73f3e8125ac073888 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
0e15fddc0426c42f3d22e5cb5609b5193adb7145 - src/nvidia/generated/g_standard_mem_nvoc.h
0a6b27d74e5e4ba872d77bfd369ddb5772abd8f8 - src/nvidia/generated/g_event_buffer_nvoc.h
9934a21ca6169499e471a2fc000c3eaee348391e - src/nvidia/generated/g_resource_fwd_decls_nvoc.h
aac0c7df733e179f2a5906ab66b302a5bee82cbe - src/nvidia/generated/g_gpu_db_nvoc.h
47ced25e3252d402b9a5c30115705d16651ab460 - src/nvidia/generated/g_object_nvoc.h
81f915ae199df67c1884bfc18f3d23f20941af6a - src/nvidia/generated/g_dce_client_nvoc.c
c8d6ddc934e0c4ae3fd2d2dc81d0d1a91c8b8d52 - src/nvidia/generated/g_disp_inst_mem_nvoc.h
b30dc7b4114007f7649e18a7be2d829a3752447a - src/nvidia/generated/g_mem_nvoc.c
33932ed2752329a63bcafd88f00e69203c3621c0 - src/nvidia/generated/g_gpu_mgr_nvoc.h
2156c006acf83494e55de3d5604e9234f73b2867 - src/nvidia/generated/g_eng_desc_nvoc.h
6742231d4f59cc03ed822b80fb3995d1821de488 - src/nvidia/generated/g_standard_mem_nvoc.c
a42b32adb0533fafb2de6b127c7e1939029cdeb5 - src/nvidia/generated/g_system_nvoc.c
a044b01f708a5690f1796579904539791e24d5a3 - src/nvidia/generated/g_hda_codec_api_nvoc.h
ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - src/nvidia/generated/g_hal_register.h
fc7f913eab7ef26b877606e0593928784c3121ec - src/nvidia/generated/g_device_nvoc.c
9c03069f964e4d628b68a4ab0cff3b44aee82bdd - src/nvidia/generated/g_rpc-structures.h
8db5b2345278ce409562ca35754447d353dd54d7 - src/nvidia/generated/g_rs_resource_nvoc.h
ad695d35b837b970b8f50a280d400ffed5067c0f - src/nvidia/generated/g_os_desc_mem_nvoc.c
14450b18d002d4e1786d4630ef4f1994c07ef188 - src/nvidia/generated/g_odb.h
93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h
dad5def7d6c24268ac1e1a75038cbf33900745ff - src/nvidia/generated/g_binary_api_nvoc.h
06094e14a41e58c8a687bc8b64197a73c0c2b61a - src/nvidia/generated/g_system_nvoc.h
92c99fd64caa9f78664ed1fd54313ee82e2cf9c7 - src/nvidia/generated/g_disp_channel_nvoc.h
e70cc806acae6fc1c3f4ffc283ded8351f3482c4 - src/nvidia/generated/g_hda_codec_api_nvoc.c
2239839c8a780a87e786439a49ab63e25d25001a - src/nvidia/generated/g_rmconfig_util.h
e3078050c80bf14c9f91f12b43eab48af94c9ec5 - src/nvidia/generated/g_disp_objs_nvoc.c
f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h
57431742e2f1bbefc9142db49a84f4e8264e4673 - src/nvidia/generated/g_mem_list_nvoc.h
12cb2f4228fe81762587413c7f346f3d271d9b6b - src/nvidia/generated/g_eng_state_nvoc.h
bfb7c703aa0e55ed5df9310a233861e43ef5c828 - src/nvidia/generated/g_prereq_tracker_nvoc.h
734ea4782083e4a7b940722577dc75177446eed1 - src/nvidia/generated/g_io_vaspace_nvoc.c
8b5821085e5aabc00408e7a90e78b2471de6797e - src/nvidia/generated/g_os_nvoc.h
5c65c680b77a501fd98460c4ce8fecd7ed95be14 - src/nvidia/generated/g_mem_mgr_nvoc.c
cf2a81f40855ceb13b0dc18fb1ee790ba939bfb2 - src/nvidia/generated/g_event_buffer_nvoc.c
d47bc1508583e02dc8234efce85fb7803dbd3d97 - src/nvidia/generated/g_hypervisor_nvoc.h
35889e5f6bdc996fa95c76d05e7b8902328d450b - src/nvidia/generated/g_rs_client_nvoc.h
61d09dd789fc4159344cec4c02ff9db13cd246eb - src/nvidia/generated/g_hal_mgr_nvoc.h
af86a67a1c33acc193efa6dba8bc46ebe5dbb5eb - src/nvidia/generated/g_gpu_class_list.c
aac848bd48955659eb5e07fcac70e6fe3c3a137a - src/nvidia/generated/g_hal_nvoc.c
1ca8ad4d9216aef1df145358c48e7ca533927e25 - src/nvidia/generated/g_objtmr_nvoc.c
b35821f54f7ec965edd25a60e58d7639cd19df19 - src/nvidia/generated/g_hal_archimpl.h
97ce053e6b047ecd0803a7571d061516de9d95ff - src/nvidia/generated/g_hal_mgr_nvoc.c
972e9ba00890776dc3a4f51300cbcd73c1691c1d - src/nvidia/generated/g_rpc-message-header.h
906af83650985c58b63fe3e1f24b75b5ac62d90d - src/nvidia/generated/g_gpu_nvoc.c
431796f7485743a0848883a204676424b4a3b65f - src/nvidia/generated/g_hal.h
44bcd3503d90703a33a7bb9c75b41111d092c5f8 - src/nvidia/generated/g_client_resource_nvoc.c
142a5e1b07a3bbe2952b27f4a65a133f5a100dc3 - src/nvidia/generated/g_prereq_tracker_nvoc.c
3c7d16d75ef53c09d7076c55976e71fd17a3f483 - src/nvidia/generated/g_subdevice_nvoc.h
7c698deeb69b4e92af3c7c4e6fc6274b75dab05c - src/nvidia/generated/g_disp_channel_nvoc.c
9b0d4695e84ec959790dd553944cb44685c5c251 - src/nvidia/generated/g_event_nvoc.h
803eb8b520597468e3dc99ecd29ffc1027dfe4be - src/nvidia/generated/g_context_dma_nvoc.h
09597f23d6a5440258656be81e7e6709390128f8 - src/nvidia/generated/g_hal_private.h
b459db8ccf299f7bda0fa9fa18ef1e3aeb2996eb - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c
170a42c047d0085873a48db0d83d59feb8dc327f - src/nvidia/generated/g_binary_api_nvoc.c
47f006ce959471f8ecd2a7b05d83d854610a521b - src/nvidia/generated/g_system_mem_nvoc.c
b9f25e208f5ea6f566dbd9cbcaaa30cd0786c31b - src/nvidia/generated/g_client_nvoc.h
31ee3939e0830f960aeb854827af0aace0dddb93 - src/nvidia/generated/g_kern_disp_nvoc.h
eb95c379eec668bfd697bcd4977d4f18da0b56bb - src/nvidia/generated/g_device_nvoc.h
1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - src/nvidia/generated/g_rmconfig_util.c
a1bfb789c1e23bac2b7a31255b7d738e40a290f2 - src/nvidia/generated/g_mem_nvoc.h
b5d4219786bd77483ce70a770caac52db51566cc - src/nvidia/generated/g_ioaccess_nvoc.c
97bab26b95f21f4618fd023284b20dd4d5a76ad4 - src/nvidia/generated/g_disp_capabilities_nvoc.h
b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - src/nvidia/generated/g_journal_nvoc.h
16c7821c01a4e728d66a25ca6eb824ce85ff908e - src/nvidia/generated/g_rs_resource_nvoc.c
6771b718fe182d524864f55fa23f145012205d5b - src/nvidia/generated/g_objtmr_nvoc.h
87c14e1c1a8f37f139f6a99efaf7752d6db48db5 - src/nvidia/generated/g_kern_disp_nvoc.c
4f3ff51033e4ef9491e8b345ffea36dfb5122055 - src/nvidia/generated/g_chips2halspec_nvoc.c
8a76494ebc5809ed30c31a9afa2a46bf2463e6e5 - src/nvidia/generated/g_dce_client_nvoc.h
e4ccb216aafed837a37fca90284b0a0413b3080d - src/nvidia/generated/g_kernel_head_nvoc.c
262192e794cba0bb120cbfe75ee037e868e34ef3 - src/nvidia/generated/g_subdevice_nvoc.c
71185f1534d3c53954c271566b610045aef3ed98 - src/nvidia/generated/g_system_mem_nvoc.h
549314acf103e21a4cab113114f719626202a19f - src/nvidia/generated/g_tmr_nvoc.c
c010d93fd293ec399a0cd05662a177e7251c7b1e - src/nvidia/generated/g_event_nvoc.c
693cd3e7b93e9377634800ff2b3669939ba10603 - src/nvidia/generated/g_kernel_head_nvoc.h
0097015ef25011bee849966ef5248d206ab0f816 - src/nvidia/generated/g_gpu_resource_nvoc.h
dc922421b0f41b7b8f0219caa623c099fc3f083d - src/nvidia/generated/g_ioaccess_nvoc.h
5a46be3060122eca672dc3bf11bdb6e68700b5e4 - src/nvidia/generated/g_gpu_halspec_nvoc.h
10645f82dd031d0aa6f4a3dfc039ef776f2fdee9 - src/nvidia/generated/g_hal_nvoc.h
574adefb17ee3e2a7d85262f8ce4d8b4bc4367b4 - src/nvidia/generated/g_gpu_halspec_nvoc.c
653b72892f7c3ce7fd3e28690863ef89826b5314 - src/nvidia/generated/g_context_dma_nvoc.c
3b1586e0aebb66d31190be64b1109232ee3467bf - src/nvidia/generated/g_ref_count_nvoc.h
155b6249c4fd472218cef640fa0a665cec10bfa4 - src/nvidia/generated/g_disp_sf_user_nvoc.h
2cac1d138a8bcf99e70068f50698f6cdd3dc57dd - src/nvidia/generated/g_syncpoint_mem_nvoc.c
6aea089965620df057ab6b900496590ca26772b2 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c
8e8c58d6e99de01acf926026506ab91499109dd4 - src/nvidia/generated/g_gpu_nvoc.h
8f1b0c4a6b75280b5155aef8490c95237bbf6f97 - src/nvidia/generated/g_gpu_group_nvoc.h
d960a819d29d7e968eaab0e7a29897426b7ba646 - src/nvidia/generated/g_io_vaspace_nvoc.h
47bed9b41213c837c4ca08aaaefe079b84dfd52f - src/nvidia/generated/g_client_nvoc.c
36b3993cc05598590bc6356bab5ea7c0a2efd2f0 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
719d890f8160efe57e4c3267db65885ebb66cd03 - src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c
4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c
381cbcd5c362e5c5563806bfff2fb60eec80eda2 - src/nvidia/src/kernel/gpu/gpu.c
cb9af9dcd3931eb62bfdb4872c4e3001ff9def26 - src/nvidia/src/kernel/gpu/gpu_rmapi.c
bfcdb98c6541f95c3a37aaa25e9ca51ec2a0b9c1 - src/nvidia/src/kernel/gpu/eng_state.c
6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - src/nvidia/src/kernel/gpu/gpu_resource_desc.c
ceb3639a86578b9d823a00a9a6553f278acb558f - src/nvidia/src/kernel/gpu/gpu_resource.c
bca16e8ff1697e953a54a3a3de4273f5584ac0df - src/nvidia/src/kernel/gpu/device_ctrl.c
493e90398cb78a3f24d2f271bbedebd8c682d7c1 - src/nvidia/src/kernel/gpu/gpu_gspclient.c
1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - src/nvidia/src/kernel/gpu/gpu_uuid.c
a4225e0074c1aee00d082f69231d1d8e7d812347 - src/nvidia/src/kernel/gpu/gpu_access.c
207b32d1423f3666feeedb85d38fa7a924c1f7a9 - src/nvidia/src/kernel/gpu/device_share.c
29458992dabff6c2550e0202b11dc47cd7f66cd5 - src/nvidia/src/kernel/gpu/gpu_engine_type.c
89543f7085fbc2ca01b5a8baae33b5de921c79e9 - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
3229e9f5d2779147d337e9c6a7b6f518079f1709 - src/nvidia/src/kernel/gpu/gpu_timeout.c
c2228fbf8366e197aec9bb75ad2c01b267aedeb7 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c
cf85f6ecacf40fa649de2c443595e2313fa364d6 - src/nvidia/src/kernel/gpu/device.c
cffbdcaacd4fd5be809fc81bd76a384920781391 - src/nvidia/src/kernel/gpu/timer/timer.c
17e9f2af953c3cf96d0eee9cfea3aad6e540c3cf - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
5a053caaa8eb655d9e0f7ab42ec1b3f0b72fb787 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
7f9874d9af6b937dac888a3ebb55a82c2a5de71b - src/nvidia/src/kernel/gpu/dce_client/dce_client.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
d852ad5a6af96e173832833379ae9d38baaed47f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
086e9a51757c3989dfe0bf89ca6c0b9c7734104a - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
c9ec73f6e2f2e87371b97ec47a65c3874dd4949a - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
3d0b8b3dabe8aab7884f1ddec7ef4f9715de31ad - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
ba49fc89b1a453aca3a79f51d3250c7c0a667327 - src/nvidia/src/kernel/gpu/subdevice/subdevice.c
8ce824bfdb06f08567a29ee5e175106c32611182 - src/nvidia/src/kernel/gpu/disp/disp_channel.c
6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
c3d94d9a49e1c0dffd8987d9b007a9cef91be561 - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
4b783bc279ea35c4b7e101a668d136f1a12d9030 - src/nvidia/src/kernel/gpu/disp/kern_disp.c
681499b2c86582cd110ede079d757c5797c4b458 - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
1533c870f3e6521f180eb967f7144a62a727d125 - src/nvidia/src/kernel/gpu/disp/disp_objs.c
ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
84fdcdf90d9a656a572774fb8330f7a1fa9f59e2 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
629566bf98be863b12e6dc6aab53d8f5ea13988c - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
0156d5407cf877b8f5c79823d3c83ead54b6385c - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
8a418dce9fbeb99d5d6e175ed8c88811866f3450 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
e7f143390807f3f4d4bf6586068378a9f5a75d57 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
611098328a114b66c6dcea4a8ea710887db006c4 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
3c463773f2f970b1764edb231d349164fe4341fc - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
c6e78a54a1b8d4ca6fe4b01d83e3199ea41606d7 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
f30ae0e8e1e32d0adb7e52b8995c277637b6bc2a - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
2bb921b462c4b50d1f42b39b4728374c7433c8cb - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
c8c4af5a28740f1e66ff4e6e9c47fc6c981ce46b - src/nvidia/src/kernel/os/os_timer.c
0e0c1b862bdba245297ffd4f725001fa2439cddf - src/nvidia/src/kernel/os/os_sanity.c
1dc0be7577b4f7914743379943bcf0d5e236eb0b - src/nvidia/src/kernel/os/os_stubs.c
1fad27934185df50c1d91b5536d0df437618382f - src/nvidia/src/kernel/os/os_init.c
df7ac5873dc42eafc335a1ddba095fbc8cd1d708 - src/nvidia/src/kernel/core/locks_common.c
61691e21cdabc8919d7b41142c97f510db9c0cc6 - src/nvidia/src/kernel/core/locks_minimal.c
8adbda67510ec9fab31edd681c51ddfb7b190d7d - src/nvidia/src/kernel/core/thread_state.c
db40522057f29afe6624e33468879e5e9813f07c - src/nvidia/src/kernel/core/system.c
afbf166f49a964873a13e19b787cae33813f9de5 - src/nvidia/src/kernel/core/hal_mgr.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c
afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c
c38181e1361a59e3252ae446a0e8761363db35e7 - src/nvidia/src/kernel/core/hal/hals_all.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c
fc39cb6ac6e9d73bd1ab98890e6b253217d6cc90 - src/nvidia/src/kernel/diagnostics/nvlog_printf.c
8192d2364dc63171b51f6ced5b1726125f1a8ff6 - src/nvidia/src/kernel/diagnostics/nvlog.c
2aa207714971c97d9486c1ed48a3123e40b6c4ff - src/nvidia/src/kernel/rmapi/rmapi_cache.c
79a130d1e1e10881ea1e5f5d8dfcb84ceb53b0f2 - src/nvidia/src/kernel/rmapi/client_resource.c
0bded8ce6e3e81de589c4e6fbb611085c705dfcd - src/nvidia/src/kernel/rmapi/event_notification.c
7fdf8e379fd2a5eeae0981bf7328163379279c29 - src/nvidia/src/kernel/rmapi/rmapi_stubs.c
fb2a191dc60c1232c198b1ff9a302883302ca526 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h
bac6ef63d11e87f9a4af3318d5be6860f861a0b9 - src/nvidia/src/kernel/rmapi/rpc_common.c
25ac4188ba55b098321700828a9386a8a6e9f80b - src/nvidia/src/kernel/rmapi/event_buffer.c
a418377318e121a2b2f83f3961da74f09a2123d0 - src/nvidia/src/kernel/rmapi/event.c
5166298f09865066535a3e04c111354ceaefbcbc - src/nvidia/src/kernel/rmapi/control.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h
a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c
19d3213dc7471e7a7d4ff379494f724869638d28 - src/nvidia/src/kernel/rmapi/mapping_cpu.c
8cc578a1e5f534e911ba4b49b58352ef9ea57772 - src/nvidia/src/kernel/rmapi/client.c
cb6835f318c0d871d72185e0ac410d03d788654a - src/nvidia/src/kernel/rmapi/binary_api.c
c59a08852553b5843beec8138caa8e2141d3d759 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h
d964061679e6f3da0e6e6c3b8e0eb93eb31fd3dc - src/nvidia/src/kernel/rmapi/resource.c
96f763eef08f1954d3f07639053db2cde2a01e39 - src/nvidia/src/kernel/rmapi/rmapi.c
b4dc306ae4d4f8850571e2fbbed0114d63f1ba93 - src/nvidia/src/kernel/rmapi/entry_points.c
3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c
7a4e3a3369efd50c9d80eaa73c48852edd6e6966 - src/nvidia/src/kernel/rmapi/rs_utils.c
f04faaeeeda2d799207fd7e0877a2bb6d5363c13 - src/nvidia/src/kernel/rmapi/mapping.c
b001f31a373973b7a4568c411e261aa8f7487441 - src/nvidia/src/kernel/rmapi/alloc_free.c
d6b3b8ac45ede7530028848749820d2cbe0f5d55 - src/nvidia/src/kernel/rmapi/resource_desc.h
ea7b6b816ca16af62c0b2040b8a76c6c10a16053 - src/nvidia/src/kernel/rmapi/resource_list.h
b28d140f1bfe0aac770127e8391400d44d5582e3 - src/nvidia/src/kernel/rmapi/rmapi_finn.c
682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c
9b1453ed00d80034a0d2e3e918d31dbe939177b0 - src/nvidia/src/kernel/rmapi/rmapi_utils.c
bb67ea7ef87ff0148473ebf1165e3afd59d63b20 - src/nvidia/src/kernel/rmapi/sharing.c
c4eeb6d566366ab2b9532f109632d4e14539332c - src/nvidia/src/kernel/rmapi/entry_points.h
a14b8d9a6e029d8a5c571283b520645a562b5c2c - src/nvidia/src/kernel/mem_mgr/vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
623dad3ec0172ed7b3818caece0db5687d587ff3 - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
38b2ed45dc7d7d7172f6d0fd2be31b43e49e41d5 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
ed8316b9cbfe13336af1f8e4cd0b492a21af44b9 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
e75d8a0eb4c22e11ececd24a43ad034bb76f12ce - src/nvidia/src/kernel/mem_mgr/standard_mem.c
630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c
223b7541c7904067914a01e4aa3e589fd1690cb6 - src/nvidia/src/kernel/mem_mgr/system_mem.c
3080c8404e554eba5eac3f6482ed6094d25ccdef - src/nvidia/src/kernel/mem_mgr/mem.c
24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c
a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c
c8f4cf70923179b7c2aaa6bd6b3eedc195655abe - src/nvidia/src/libraries/containers/vector.c
8991136ccb86f511f60254955ac3d86072b071f2 - src/nvidia/src/libraries/containers/map.c
864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c
6553a1c368e9d9709fb89b5e43524757f786c58b - src/nvidia/src/libraries/containers/queue.c
5940d69147d1376b03cd96fa69796360b279ae97 - src/nvidia/src/libraries/containers/list.c
23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c
ea3254ebd278d9efb7dd348e52370d780c23cd94 - src/nvidia/src/libraries/containers/eheap/eheap_old.c
9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c
cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c
0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
619f9f6df576ad20d32c30fd9a69733dc5c19da8 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c
702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h
eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h
9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c
8f41e7127a65102f0035c03536c701b7ecdaa909 - src/nvidia/src/libraries/nvport/string/string_generic.c
caff00b37e7f58fde886abcc2737c08526fa089e - src/nvidia/src/libraries/nvport/memory/memory_generic.h
c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
3e3ab114d56dfcecc2886d8f9cdb8f365c5093c7 - src/nvidia/src/libraries/nvport/memory/memory_tracking.c
522da5465e5596d48cf6393c329811f3c708be19 - src/nvidia/src/libraries/resserv/src/rs_resource.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c
0c9581aa68a77cb9977a7fbcfd2077ccb618206e - src/nvidia/src/libraries/resserv/src/rs_access_rights.c
f55556cd2392f55f2609ef69fca1caf2dd348e3f - src/nvidia/src/libraries/resserv/src/rs_server.c
310a8d3442285113f4ba672ba7fcc7f2aa295c6a - src/nvidia/src/libraries/resserv/src/rs_client.c
dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c
cf48c6335eb7ff27cd7cae0faad77dd98669ad95 - src/nvidia/src/libraries/utils/nvassert.c
d3e5f13be70c8e458401ec9bdad007dfadedcc11 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c
4cfe1ebd2ad6968ed513025aed61ecf2127aa683 - src/nvidia/src/libraries/nvoc/src/runtime.c
b417d06ed1845f5ed69181d8eb9de6b6a87fa973 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - src/nvidia/arch/nvalloc/unix/include/os_custom.h
507d35d1d4c5ba94ef975f75e16c63244d6cd650 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
2d644a3f78bcda50e813b25156e9df07ec6da7b8 - src/nvidia/arch/nvalloc/unix/include/nv.h
3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h
e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
1a98a2aaf386cd3d03b4b5513d6a511c60f71c2c - src/nvidia/arch/nvalloc/unix/include/nv-reg.h
5f2a30347378f2ed028c9fb7c8abea9b6032141c - src/nvidia/arch/nvalloc/unix/include/osapi.h
4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h
ae7d5cb2c57beeea12724e09d957e233a71c12a1 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
7188b83b28051b40cda60f05cacfa12b94ade4dc - src/nvidia/arch/nvalloc/unix/include/osfuncs.h
de6913c5e5092a417530ac9f818497824eab7946 - src/nvidia/arch/nvalloc/unix/include/os-interface.h
ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
a28937330829b4f27a9da5e2c3776ceb293b6085 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
6ca29f3d6b38fb5d05ff222cd1b79ade811a74b2 - src/nvidia/arch/nvalloc/unix/src/osunix.c
8f725a01c2a29658580936a87bdd33308030a332 - src/nvidia/arch/nvalloc/unix/src/os.c
866073d8caaa58055268aa5b3548eec6e1168d04 - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
63edc719390a814eb70290e709634d133ad198cc - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
690927567b5344c8030e2c52d91f824bb94e956c - src/nvidia/arch/nvalloc/unix/src/registry.c
eccfc4f261fd8531254eb2961120073aac9847db - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
4971626589ae66cc273ad11b80f0ab875fb39c05 - src/nvidia/arch/nvalloc/unix/src/osapi.c
68d80083483bf4976d6d83153a3880e5949e0824 - src/nvidia/arch/nvalloc/unix/src/osinit.c
69d2719c759456a22ccc4de470e5d15cf0c3d26c - src/nvidia/arch/nvalloc/unix/src/escape.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
11c6d988bccbdf49ac241d77e6363c7843a0191f - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ac5842e58bf82bb8f0b738695f9b459709f03b92 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h
f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h
a9c2b16261b46eb0f86fc611b8b3b5118e2b4e59 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h
76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h
ce5439e2066933d7d1045b7813ef0195b55e78fc - src/nvidia/inc/kernel/gpu/gpu_engine_type.h
7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h
ce3302c1890e2f7990434f7335cb619b12dee854 - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h
10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h
bf894a769c46d5d173e3875cd9667bb3fe82feb9 - src/nvidia/inc/kernel/gpu/gpu_timeout.h
c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h
6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h
1938fd2511213c8003864d879cf1c41ae1169a5f - src/nvidia/inc/kernel/gpu/gpu_uuid.h
0d29e997f13d314ea320898ffb40b7a3a58898e2 - src/nvidia/inc/kernel/gpu/gpu_child_list.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h
57a4a0d006588395c0b8b6d447acd7b4a9eeeb30 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h
be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
c2957c7f40cc454ba12fd954397fcea5d95ccae5 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h
e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
983bf02af93d39384c8b3ef0306193b63d8e82d9 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
9cef17543abaa167299c57e8f043cb4b975cf640 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
70c31f5c6997542d0a4693b4ad7a6539cc3ec421 - src/nvidia/inc/kernel/gpu/gsp/message_queue.h
408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h
f60f647bcf307f7639bccb99cb0244c7314115a1 - src/nvidia/inc/kernel/os/os_stub.h
c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h
5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h
3e11362627f9ad55e7d657da7929562230220591 - src/nvidia/inc/kernel/platform/sli/sli.h
b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h
42596ff1ef62df0b439e8a1e73c71b495dcf311a - src/nvidia/inc/kernel/core/printf.h
37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h
93f40859dc710fd965a643da1d176790cc8886d5 - src/nvidia/inc/kernel/core/locks.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h
457c02092adfc1587d6e3cd866e28c567acbc43a - src/nvidia/inc/kernel/core/info_block.h
ce992cb08e286a88c491ee8e64019ad5f8493d1b - src/nvidia/inc/kernel/core/thread_state.h
2b41b4346b7d07ca8d505574ea0f9aad6910dd69 - src/nvidia/inc/kernel/core/prelude.h
3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h
b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h
99a27d87c7f1487f8df5781d284c2e9a83525892 - src/nvidia/inc/kernel/rmapi/binary_api.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h
aab23ad58777406fa75b55778adc747f17c1afdb - src/nvidia/inc/kernel/rmapi/rs_utils.h
7646fc9f1d17b29747b457655d65f7cae80ccc33 - src/nvidia/inc/kernel/rmapi/control.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h
497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h
7e1200e609082316ed4bc2d0d925e15396b695a5 - src/nvidia/inc/kernel/rmapi/mapping_list.h
6f0f62525d2b966a24adaaabf19e79e6efc4e572 - src/nvidia/inc/kernel/rmapi/rmapi_utils.h
2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h
1399c6dc08b96577bb778e66730e7f4bcf8e7256 - src/nvidia/inc/kernel/rmapi/rmapi.h
61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h
4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h
ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h
083667047714a008219fa41b3a7deb9803bbe48a - src/nvidia/inc/libraries/poolalloc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h
67ecfa8adcb2b5bb5eb8e425bc5889390fd77ca8 - src/nvidia/inc/libraries/containers/list.h
fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h
4c8c52993d4a99f7552cd10e8c1fc8aea0330a4a - src/nvidia/inc/libraries/containers/vector.h
5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h
9f76ab27650b137566bf49202857c3195674d44a - src/nvidia/inc/libraries/containers/map.h
1dacc1c1efc757c12e4c64eac171474a798b86fd - src/nvidia/inc/libraries/containers/eheap_old.h
63a8244e13f9217461f624ab46281716ef42b20f - src/nvidia/inc/libraries/containers/ringbuf.h
5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h
a23790cded20fe2347c19083f2b7430aeb26ab27 - src/nvidia/inc/libraries/containers/type_safety.h
2eb9b0121765c0a3e1085f41a3d47c89e7d5dcb0 - src/nvidia/inc/libraries/nvlog/nvlog.h
f97ea1dce9d593ecc599df510c98054db2b2d1a2 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h
d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
7f623508b3f3631ce89dad6d8762f593b1ac0d71 - src/nvidia/inc/libraries/tls/tls.h
56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
a5e6f98ac5fb53fd26ee429c65b73fa1a4715631 - src/nvidia/inc/libraries/ioaccess/ioaccess.h
7d8efe42c402cbbdd1710ef1f7498bf3e883a743 - src/nvidia/inc/libraries/nvport/string.h
6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h
4e25b80a74aad3f6403d7c34cd55f0ed58824888 - src/nvidia/inc/libraries/nvport/cpu.h
0fe8c0bd2791b105baf7cad7a90797ed9f743115 - src/nvidia/inc/libraries/nvport/memory.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h
87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h
f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h
fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h
199df020beb31a865f19ceec20f8f758e757c39a - src/nvidia/inc/libraries/nvport/debug.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h
f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h
254e86ee0c1d5c0ad652bc1f3182b46f6d5c0f3b - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
ba267abed142db81efe7807b53c26ab4345da286 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
23afbd04f4e4b3301edcfdec003c8e936d898e38 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h
bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h
cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h
1d04abec9438189995cb2a675f4e35a79599aae4 - src/nvidia/inc/libraries/resserv/rs_client.h
98fa7e07b6b41d1ba4ace1de93b7d7ddfd1d7c20 - src/nvidia/inc/libraries/resserv/rs_resource.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h
290f84ec0b699931373eea3cd84437faf578e4a3 - src/nvidia/inc/libraries/resserv/resserv.h
3e431d72308a8b5fc423901a09079904a644b96e - src/nvidia/inc/libraries/resserv/rs_server.h
c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h
1aabd992631089ec24621835e046ddf2e2fd4232 - src/nvidia/inc/libraries/utils/nvbitvector.h
77db350059fa3326500af4269f09e1f02c1ab07b - src/nvidia/inc/libraries/utils/nvassert.h
d229861edca62007af83b86aa7fc1c77e957aa6f - src/nvidia/inc/libraries/utils/nvprintf.h
d0458cdc61eb650d57429f9ae58e60a62ab93025 - src/nvidia/inc/libraries/utils/nvrange.h
9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h
e35ff9733ea7fbffe0641399ccb0fd92a492e30d - src/nvidia/inc/libraries/nvoc/runtime.h
85b30b26f790b55f5370bbe9bb07349c62353841 - src/nvidia/inc/libraries/nvoc/object.h
664ff0e10e893923b70425fa49c9c48ed0735573 - src/nvidia/inc/libraries/nvoc/rtti.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h
3919368b5b4cdd72d7da49801232048b5e786845 - src/nvidia/inc/libraries/nvoc/prelude.h
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf
0a6f3c96043c01acbbb789874a7579728b89fcfd - kernel-open/Kbuild
4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - kernel-open/Makefile
e27150b45beb9a4bdf0b494c9dddb541f1d7a36b - kernel-open/conftest.sh
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia-modeset/nv-kthread-q.c
2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h
7dbe6f8405e47c1380c6151c7c7d12b0b02ef7f4 - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
252660f72b80add6f6071dd0b86288dda8dbb168 - kernel-open/nvidia-modeset/nvkms.h
6e4ae13d024a1df676736752df805b6f91511009 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
487d949cacad8a734bab459c962a157fe56d373f - kernel-open/nvidia-modeset/nvidia-modeset-linux.c
b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h
57937fb42f6fb312f7c3cf63aa399e43bad13c8c - kernel-open/common/inc/nv-proto.h
751abf80513898b35a6449725e27724b1e23ac50 - kernel-open/common/inc/nvmisc.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h
b417d06ed1845f5ed69181d8eb9de6b6a87fa973 - kernel-open/common/inc/nv-firmware.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h
b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h
507d35d1d4c5ba94ef975f75e16c63244d6cd650 - kernel-open/common/inc/nv-ioctl.h
fc319569799d54944cd09b0e170e29d67b33072d - kernel-open/common/inc/nv.h
fa267c903e9c449e62dbb6945906400d43417eff - kernel-open/common/inc/nvlimits.h
891192c9aabdb45fb4a798cc24cd89d205972d3f - kernel-open/common/inc/nv_uvm_types.h
a0c57e8ffbe1ae12de70e56b740737dae5394a18 - kernel-open/common/inc/nv-linux.h
689d6be9302d488000e57a329373feeb14e93798 - kernel-open/common/inc/nv-procfs-utils.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h
0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h
b7f5d125ca0cbd4631012894b635a58cfc9f8e06 - kernel-open/common/inc/nv-pgprot.h
b15c5fe5d969414640a2cb374b707c230e7597e4 - kernel-open/common/inc/nv-hash.h
e1144f5bd643d24f67b7577c16c687294cb50d39 - kernel-open/common/inc/rm-gpu-ops.h
4a97d807a225d792544578f8112c9a3f90cc38f6 - kernel-open/common/inc/nvstatuscodes.h
d51449fa2fd19748007f2e98f0233c92b45f9572 - kernel-open/common/inc/nvkms-api-types.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h
bf4fdaa93deed0b110d5ca954a1f9678ffaabc6e - kernel-open/common/inc/nv-platform.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h
5cf4b517c9bd8f14593c1a6450078a774a39dd08 - kernel-open/common/inc/nv-hypervisor.h
ceac0fe7333f3a67b8fb63de42ab567dd905949f - kernel-open/common/inc/nv-ioctl-numa.h
3665b1e35c52be6b971ab5117ce614109e110b7d - kernel-open/common/inc/nv-mm.h
36c20e9c111e66601b025802f840e7b87d09cdde - kernel-open/common/inc/nvkms-kapi.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h
19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h
1d17329caf26cdf931122b3c3b7edf4932f43c38 - kernel-open/common/inc/nv-msi.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h
4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - kernel-open/common/inc/nv-register-module.h
4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - kernel-open/common/inc/nv-ioctl-numbers.h
03257213e55fff1c07c75c6dcf69afa920372822 - kernel-open/common/inc/nvtypes.h
d25291d32caef187daf3589ce4976e4fa6bec70d - kernel-open/common/inc/nv-time.h
1c49c1642d44ec347f82ff0aa06d0fca6213bad2 - kernel-open/common/inc/nvimpshared.h
e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - kernel-open/common/inc/nv_speculation_barrier.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h
4856fe869a5f3141e5d7f7d1b0a6affad94cbc31 - kernel-open/common/inc/nv-pci.h
3603c631c6cf784ec862e4e45f05939d98679002 - kernel-open/common/inc/nv-kthread-q.h
d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h
906329ae5773732896e6fe94948f7674d0b04c17 - kernel-open/common/inc/os_gpio.h
c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h
7b2e2e6ff278acddc6980b330f68e374f38e0a6c - kernel-open/common/inc/nv-timer.h
f428218ee6f5d0289602495a1cfb287db4fb0823 - kernel-open/common/inc/nv_uvm_interface.h
de6913c5e5092a417530ac9f818497824eab7946 - kernel-open/common/inc/os-interface.h
e42d91cd7e6c17796fa89a172146950261f45d42 - kernel-open/common/inc/nv-lock.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h
86443277db67b64c70260e5668bb4140bc90165c - kernel-open/nvidia/nv-clk.c
ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c
4c64885083621f5f313a7dee72e14eee8abed2a0 - kernel-open/nvidia/nvidia-sources.Kbuild
f701fb148bda4eb03332ab45194a4824e499cab7 - kernel-open/nvidia/nv-platform.c
e5cd40b060a69cf71220c910e9428d7f261892f7 - kernel-open/nvidia/internal_crypt_lib.h
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia/nv-kthread-q.c
4e5a330fa40dab218821976ac1b530c649d48994 - kernel-open/nvidia/libspdm_ecc.c
0a3ad5cdacfe156b02f53c0087bdc0ec9509cd6a - kernel-open/nvidia/nv-ipc-soc.c
6e669fe32e4b69dcdbc9739dc8a45fb800547d53 - kernel-open/nvidia/nv-p2p.c
ab04c42e0e8e7f48f1a7074885278bbb6006d65f - kernel-open/nvidia/nv-bpmp.c
95ae148b016e4111122c2d9f8f004b53e78998f3 - kernel-open/nvidia/nv-memdbg.c
fbae5663e3c278d8206d07ec6446ca4c2781795f - kernel-open/nvidia/nv-ibmnpu.h
ec3055aa73c6c65b601ea040989f0b638a847e86 - kernel-open/nvidia/os-interface.c
dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c
980556d84bc56e819955b9338a43a9d970dba11d - kernel-open/nvidia/nv_gpu_ops.h
4eee7319202366822e17d29ecec9f662c075e7ac - kernel-open/nvidia/nv-rsync.c
2f6e4c6ee6f809097c8b07a7b698e8614bf25e57 - kernel-open/nvidia/nv-caps.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c
57a06cab892f111b0fb1ebe182c0c688560e750e - kernel-open/nvidia/nvspdm_cryptlib_extensions.h
189eebce734b698f0fd0b60290eca7922b865888 - kernel-open/nvidia/nv-imp.c
8bedc7374d7a43250e49fb09139c511b489d45e3 - kernel-open/nvidia/nv-pci-table.h
68d781e929d103e6fa55fa92b5d4f933fbfb6526 - kernel-open/nvidia/nv-report-err.h
94c406f36836c3396b0ca08b4ff71496666b9c43 - kernel-open/nvidia/os-usermap.c
dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - kernel-open/nvidia/nv-modeset-interface.c
06e7ec77cd21c43f900984553a4960064753e444 - kernel-open/nvidia/nv-platform-pm.c
cf98395acb4430a7c105218f7a4b5f7e810b39cf - kernel-open/nvidia/os-registry.c
7b1bd10726481626dd51f4eebb693794561c20f6 - kernel-open/nvidia/nv-host1x.c
1a98a2aaf386cd3d03b4b5513d6a511c60f71c2c - kernel-open/nvidia/nv-reg.h
42b9924aa348e9b23dffba9b613108d58f3a671e - kernel-open/nvidia/nv.c
37654472e65659be229b5e35c6f25c0724929511 - kernel-open/nvidia/nv-frontend.c
d9221522e02e18b037b8929fbc075dc3c1e58654 - kernel-open/nvidia/nv-pci-table.c
94344ec0af21bd9c7c7ab912f7bd3a8668a3e0aa - kernel-open/nvidia/os-pci.c
b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c
70a9117dce7471a07178d9456b146a033d6b544b - kernel-open/nvidia/nv-dma.c
946fb049ca50c9bb39897eca4b8443278043eea2 - kernel-open/nvidia/nv-vm.c
6710f4603a9d3e14bcaefdf415b1cfff9ec9b7ec - kernel-open/nvidia/libspdm_aead.c
e0aff92ee8ddec261d8f0d81c41f837503c4b571 - kernel-open/nvidia/nv-dsi-parse-panel-props.c
cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - kernel-open/nvidia/nv-rsync.h
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h
dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c
2fab5ae911554508e6e7a3b25824e8b2c27e85c2 - kernel-open/nvidia/nv-ibmnpu.c
ce537a7d786bd11a4429bf7c59836d5373a66f61 - kernel-open/nvidia/nv-i2c.c
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h
64f1c96761f6d9e7e02ab049dd0c810196568036 - kernel-open/nvidia/nv-pat.c
9104dc5f36a825aaf1208b54b167965625d4a433 - kernel-open/nvidia/nv_uvm_interface.c
9b701fe42a0e87d62c58b15c553086a608e89f7b - kernel-open/nvidia/nv-frontend.h
02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c
11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c
5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - kernel-open/nvidia/nv-vtophys.c
9883eb32e5d4377c3dce1c7cb54d0e05c05e128b - kernel-open/nvidia/nv-mmap.c
01d4701e8302e345275f1ec60b9718e645b5663c - kernel-open/nvidia/libspdm_x509.c
e8daae4e6106429378673988293aaa1fcd80f0eb - kernel-open/nvidia/nv-pci.c
69f203ad21e643f7b7c85e7e86bd4b674a3536de - kernel-open/nvidia/nv-acpi.c
8c9fd9590d7e3ad333ae03d5f22b72ffbdbe6e70 - kernel-open/nvidia/nv-dmabuf.c
c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - kernel-open/nvidia/nv-report-err.c
c1ebcfec42f7898dd9d909eacd439d288b80523f - kernel-open/nvidia/os-mlock.c
d68af9144d3d487308e73d0a52f4474f8047d6ca - kernel-open/nvidia/nv-gpio.c
7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - kernel-open/nvidia/nv-usermap.c
e0a37b715684ae0f434327e4ce1b5832caf7ea4e - kernel-open/nvidia/nv-nano-timer.c
3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c
fc22bea3040ae178492cb9c7a62f1d0012b1c113 - kernel-open/nvidia/nv-procfs.c
a46f27be57870c7669f3e43fffb7e1fdaff5a3d6 - kernel-open/nvidia/nvidia.Kbuild
6060392eec4e707ac61ebca3995b6a966eba7fc1 - kernel-open/nvidia/nv-p2p.h
642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c
8f87a475c202458948025d1521968677fc11dd50 - kernel-open/nvidia/nv-msi.c
e2da77ff1bc25c0b1de69af7c09e0bde26c34e30 - kernel-open/nvidia/libspdm_shash.c
9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
95b97f5a3ddcf73ed5d7fa0be9e27aec776d7c13 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
c276be3eb63bb451edfe9ed13859c251530743e6 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
5b79fbc90502b1ba8d1f9966fc7b9a6fd7ef07b4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
92ab7c0bf545029c4c1d9a0ab68b53eedc655f9c - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
d007df1d642e836595331598ca0313084922f3ee - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
7398ff33b24fa58315cc40776bc3451e090aa437 - kernel-open/nvidia/internal/libspdm_lib_config.h
19b5d633f4560d545f622ada0dd352d5aa02c651 - kernel-open/nvidia/library/cryptlib.h
d5ddc354e191d6178625b0df8e8b34e8c3e4c474 - kernel-open/nvidia/library/spdm_lib_config.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h
66b33e4ac9abe09835635f6776c1222deefad741 - kernel-open/nvidia-drm/nvidia-drm-fb.h
23586447526d9ffedd7878b6cf5ba00139fadb5e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
6d65ea9f067e09831a8196022bfe00a145bec270 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia-drm/nv-kthread-q.c
c1af941dd5144b05995dcf5721652a4f126e175f - kernel-open/nvidia-drm/nvidia-drm-priv.h
c52acdbc07f16aa78570d9e6a7f62e493264fde1 - kernel-open/nvidia-drm/nvidia-drm-helper.c
3a5a66c304cd0093e98279968e33ed600695e0d0 - kernel-open/nvidia-drm/nvidia-drm-drv.c
511ea7cd9e7778c6adc028ae13377c1a8856b72a - kernel-open/nvidia-drm/nvidia-drm-format.c
e362c64aa67b47becdbf5c8ba2a245e135adeedf - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
55e26337c0d52b5ec4f6ab403e9306417d2893f8 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
672afea77ca2c2575f278d9e182ba1188e35e971 - kernel-open/nvidia-drm/nvidia-drm-encoder.c
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h
2c0518192eac1a1877eef0dbf7b668e8450d0821 - kernel-open/nvidia-drm/nvidia-drm-helper.h
273d0cafeb0f21bf9b7d189f2dc6278e1a3c9672 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h
eb98761cdc99141ad937966e5533c57189db376a - kernel-open/nvidia-drm/nvidia-drm-fence.h
8bedc7374d7a43250e49fb09139c511b489d45e3 - kernel-open/nvidia-drm/nv-pci-table.h
8da06bd922850e840c94ed380e3b92c63aecbf70 - kernel-open/nvidia-drm/nvidia-drm-fb.c
044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - kernel-open/nvidia-drm/nvidia-drm-conftest.h
8b2063f0cc2e328f4f986c2ce556cfb626c89810 - kernel-open/nvidia-drm/nvidia-drm-utils.c
487db563f4e5153ffc976fc2aa26636ebb4cd534 - kernel-open/nvidia-drm/nvidia-drm-crtc.h
deb00fa4d1de972d93d8e72355d81ba87044c86f - kernel-open/nvidia-drm/nvidia-drm-fence.c
dc0fe38909e2f38e919495b7b4f21652a035a3ee - kernel-open/nvidia-drm/nvidia-drm.c
203295380efca7e422746805437b05ce22505424 - kernel-open/nvidia-drm/nvidia-drm-gem.c
1f0cdee2468f842c06bb84aceef60e0723023084 - kernel-open/nvidia-drm/nvidia-drm-linux.c
97b6c56b1407de976898e0a8b5a8f38a5211f8bb - kernel-open/nvidia-drm/nvidia-drm-format.h
d9221522e02e18b037b8929fbc075dc3c1e58654 - kernel-open/nvidia-drm/nv-pci-table.c
ec550cba2bebff2c5054b6e12fc43d81e37ade48 - kernel-open/nvidia-drm/nvidia-dma-fence-helper.h
8a8b431f45bd0fe477759c1527d792cb9a1fa3f5 - kernel-open/nvidia-drm/nvidia-drm-gem.h
79bcf373ff7d728740716acde5e2d44e924efefa - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
734f8ad9fbbe2e07b7d8c38455f66be9f75de127 - kernel-open/nvidia-drm/nvidia-drm-crtc.c
6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h
b91df730fba3c2f9401321557bb1bc2e64bbf980 - kernel-open/nvidia-drm/nvidia-drm-connector.h
eca70b3b8146903ec678a60eebb0462e6ccf4569 - kernel-open/nvidia-drm/nvidia-drm-encoder.h
090da9f25d980463c9a415e1ea9060036ca6d191 - kernel-open/nvidia-drm/nvidia-drm.Kbuild
4b68b6cb0f98116376be36733f5ae60eec85d78d - kernel-open/nvidia-drm/nvidia-drm-ioctl.h
61c61f91d1a29d6f7794a67eac337152b58aaac0 - kernel-open/nvidia-drm/nvidia-drm-connector.c
fe9132110f104ff7ebba922ce6dd66a2d08a998d - kernel-open/nvidia-drm/nvidia-drm-modeset.c
2eba218d75f3802d7bab34d0dd6320f872b2d604 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
9a882b31b2acc9e1ad3909c0061eee536e648aae - kernel-open/nvidia-drm/nvidia-drm-drv.h

Change-Id: I3423fef8e2ecc3bf616f1017867b594264888e47
This commit is contained in:
svcmobrel-release
2023-12-04 14:45:30 -08:00
parent d92e92ae48
commit e310fe9ca1
1276 changed files with 1536 additions and 1331 deletions

View File

@@ -0,0 +1,211 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_AEAD_H
#define CRYPTLIB_AEAD_H
/*=====================================================================================
* Authenticated Encryption with Associated data (AEAD) Cryptography Primitives
*=====================================================================================
*/
#if LIBSPDM_AEAD_GCM_SUPPORT
/**
* Performs AEAD AES-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated encryption succeeded.
* @retval false AEAD AES-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD AES-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated decryption succeeded.
* @retval false AEAD AES-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_GCM_SUPPORT */
#if LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
/**
* Performs AEAD ChaCha20Poly1305 authenticated encryption on a data buffer and additional
* authenticated data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated encryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated encryption failed.
**/
extern bool libspdm_aead_chacha20_poly1305_encrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, uint8_t *tag_out,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD ChaCha20Poly1305 authenticated decryption on a data buffer and additional authenticated data (AAD).
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated decryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated decryption failed.
*
**/
extern bool libspdm_aead_chacha20_poly1305_decrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, const uint8_t *tag,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT */
#if LIBSPDM_AEAD_SM4_SUPPORT
/**
* Performs AEAD SM4-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated encryption succeeded.
* @retval false AEAD SM4-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_sm4_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD SM4-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated decryption succeeded.
* @retval false AEAD SM4-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_sm4_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_SM4_SUPPORT */
#endif /* CRYPTLIB_AEAD_H */

View File

@@ -0,0 +1,416 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_CERT_H
#define CRYPTLIB_CERT_H
/**
* Retrieve the tag and length of the tag.
*
* @param ptr The position in the ASN.1 data.
* @param end End of data.
* @param length The variable that will receive the length.
* @param tag The expected tag.
*
* @retval true Get tag successful.
* @retval false Failed to get tag or tag not match.
**/
extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, uint32_t tag);
/**
* Retrieve the subject bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If subject_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_subject Pointer to the retrieved certificate subject bytes.
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
* and the size of buffer returned cert_subject on output.
*
* @retval true The certificate subject retrieved successfully.
* @retval false Invalid certificate, or the subject_size is too small for the result.
* The subject_size will be updated with the required size.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size);
/**
* Retrieve the version from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] version Pointer to the retrieved version integer.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, size_t *version);
/**
* Retrieve the serialNumber from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] serial_number Pointer to the retrieved certificate serial_number bytes.
* @param[in, out] serial_number_size The size in bytes of the serial_number buffer on input,
* and the size of buffer returned serial_number on output.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size);
/**
* Retrieve the issuer bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If issuer_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_issuer Pointer to the retrieved certificate subject bytes.
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
* and the size of buffer returned cert_issuer on output.
*
* @retval true The certificate issuer retrieved successfully.
* @retval false Invalid certificate, or the issuer_size is too small for the result.
* The issuer_size will be updated with the required size.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size);
/**
* Retrieve Extension data from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] oid Object identifier buffer
* @param[in] oid_size Object identifier buffer size
* @param[out] extension_data Extension bytes.
* @param[in, out] extension_data_size Extension bytes size.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size);
/**
* Retrieve the Validity from one X.509 certificate
*
* If cert is NULL, then return false.
* If CertIssuerSize is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] from notBefore Pointer to date_time object.
* @param[in,out] from_size notBefore date_time object size.
* @param[out] to notAfter Pointer to date_time object.
* @param[in,out] to_size notAfter date_time object size.
*
* Note: libspdm_x509_compare_date_time to compare date_time oject
* x509SetDateTime to get a date_time object from a date_time_str
*
* @retval true The certificate Validity retrieved successfully.
* @retval false Invalid certificate, or Validity retrieve failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size);
/**
* Format a date_time object into DataTime buffer
*
* If date_time_str is NULL, then return false.
* If date_time_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] date_time_str date_time string like YYYYMMDDhhmmssZ
* Ref: https://www.w3.org/TR/NOTE-datetime
* Z stand for UTC time
* @param[out] date_time Pointer to a date_time object.
* @param[in,out] date_time_size date_time object buffer size.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_set_date_time(const char *date_time_str, void *date_time,
size_t *date_time_size);
/**
* Compare date_time1 object and date_time2 object.
*
* If date_time1 is NULL, then return -2.
* If date_time2 is NULL, then return -2.
* If date_time1 == date_time2, then return 0
* If date_time1 > date_time2, then return 1
* If date_time1 < date_time2, then return -1
*
* @param[in] date_time1 Pointer to a date_time Ojbect
* @param[in] date_time2 Pointer to a date_time Object
*
* @retval 0 If date_time1 == date_time2
* @retval 1 If date_time1 > date_time2
* @retval -1 If date_time1 < date_time2
**/
extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2);
/**
* Retrieve the key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
*
* @retval true The certificate key usage retrieved successfully.
* @retval false Invalid certificate, or usage is NULL
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
/**
* Retrieve the Extended key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage bytes.
* @param[in, out] usage_size Key usage buffer sizs in bytes.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size);
/**
* Retrieve the basic constraints from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] basic_constraints Basic constraints bytes.
* @param[in, out] basic_constraints_size Basic constraints buffer sizs in bytes.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* If cert is NULL, then return false.
* If ca_cert is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate to be verified.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] ca_cert Pointer to the DER-encoded trusted CA certificate.
* @param[in] ca_cert_size Size of the CA Certificate in bytes.
*
* @retval true The certificate was issued by the trusted CA.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] root_cert Trusted Root Certificate buffer.
*
* @param[in] root_cert_length Trusted Root Certificate buffer length.
*
* @retval true All cerificates were issued by the first certificate in X509Certchain.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
**/
extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain,
size_t cert_chain_length);
/**
* Get one X509 certificate from cert_chain.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] cert_index Index of certificate. If index is -1 indecate the
* last certificate in cert_chain.
*
* @param[out] cert The certificate at the index of cert_chain.
* @param[out] cert_length The length certificate at the index of cert_chain.
*
* @retval true Success.
* @retval false Failed to get certificate from certificate chain.
**/
extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length);
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/**
* Retrieve the RSA public key from one DER-encoded X509 certificate.
*
* If cert is NULL, then return false.
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] rsa_context Pointer to new-generated RSA context which contain the retrieved
* RSA public key component. Use libspdm_rsa_free() function to free the
* resource.
*
* @retval true RSA public key was retrieved successfully.
* @retval false Fail to retrieve RSA public key from X509 certificate.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Retrieve the EC public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ec_context Pointer to new-generated EC DSA context which contain the retrieved
* EC public key component. Use libspdm_ec_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ec_context is NULL, then return false.
*
* @retval true EC public key was retrieved successfully.
* @retval false Fail to retrieve EC public key from X509 certificate.
*
**/
extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Retrieve the Ed public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ecd_context Pointer to new-generated Ed DSA context which contain the retrieved
* Ed public key component. Use libspdm_ecd_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ecd_context is NULL, then return false.
*
* @retval true Ed public key was retrieved successfully.
* @retval false Fail to retrieve Ed public key from X509 certificate.
*
**/
extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Retrieve the sm2 public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] sm2_context Pointer to new-generated sm2 context which contain the retrieved
* sm2 public key component. Use sm2_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If sm2_context is NULL, then return false.
*
* @retval true sm2 public key was retrieved successfully.
* @retval false Fail to retrieve sm2 public key from X509 certificate.
*
**/
extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
/**
* Generate a CSR.
*
* @param[in] hash_nid hash algo for sign
* @param[in] asym_nid asym algo for sign
*
* @param[in] requester_info requester info to gen CSR
* @param[in] requester_info_length The len of requester info
*
* @param[in] context Pointer to asymmetric context
* @param[in] subject_name Subject name: should be break with ',' in the middle
* example: "C=AA,CN=BB"
*
* Subject names should contain a comma-separated list of OID types and values:
* The valid OID type name is in:
* {"CN", "commonName", "C", "countryName", "O", "organizationName","L",
* "OU", "organizationalUnitName", "ST", "stateOrProvinceName", "emailAddress",
* "serialNumber", "postalAddress", "postalCode", "dnQualifier", "title",
* "SN","givenName","GN", "initials", "pseudonym", "generationQualifier", "domainComponent", "DC"}.
* Note: The object of C and countryName should be CSR Supported Country Codes
*
* @param[in] csr_len For input, csr_len is the size of store CSR buffer.
* For output, csr_len is CSR len for DER format
* @param[in] csr_pointer For input, csr_pointer is buffer address to store CSR.
* For output, csr_pointer is address for stored CSR.
* The csr_pointer address will be changed.
*
* @retval true Success.
* @retval false Failed to gen CSR.
**/
extern bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer);
#endif /* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP */
#endif /* CRYPTLIB_CERT_H */

View File

@@ -0,0 +1,98 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_DH_H
#define CRYPTLIB_DH_H
/*=====================================================================================
* Diffie-Hellman Key Exchange Primitives
*=====================================================================================
*/
#if LIBSPDM_FFDHE_SUPPORT
/**
* Allocates and initializes one Diffie-Hellman context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Diffie-Hellman context that has been initialized.
* If the allocations fails, libspdm_dh_new_by_nid() returns NULL.
* If the interface is not supported, libspdm_dh_new_by_nid() returns NULL.
**/
extern void *libspdm_dh_new_by_nid(size_t nid);
/**
* Release the specified DH context.
*
* @param[in] dh_context Pointer to the DH context to be released.
**/
void libspdm_dh_free(void *dh_context);
/**
* Generates DH public key.
*
* This function generates random secret exponent, and computes the public key, which is
* returned via parameter public_key and public_key_size. DH context is updated accordingly.
* If the public_key buffer is too small to hold the public key, false is returned and
* public_key_size is set to the required buffer size to obtain the public key.
*
* If dh_context is NULL, then return false.
* If public_key_size is NULL, then return false.
* If public_key_size is large enough but public_key is NULL, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the public_size is 256.
* For FFDHE3072, the public_size is 384.
* For FFDHE4096, the public_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[out] public_key Pointer to the buffer to receive generated public key.
* @param[in, out] public_key_size On input, the size of public_key buffer in bytes.
* On output, the size of data returned in public_key buffer in
* bytes.
*
* @retval true DH public key generation succeeded.
* @retval false DH public key generation failed.
* @retval false public_key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_generate_key(void *dh_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key, this function computes the exchanged common key, based on its own
* context including value of prime modulus and random secret exponent.
*
* If dh_context is NULL, then return false.
* If peer_public_key is NULL, then return false.
* If key_size is NULL, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the peer_public_size and key_size is 256.
* For FFDHE3072, the peer_public_size and key_size is 384.
* For FFDHE4096, the peer_public_size and key_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[in] peer_public_key Pointer to the peer's public key.
* @param[in] peer_public_key_size size of peer's public key in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in
* bytes.
*
* @retval true DH exchanged key generation succeeded.
* @retval false DH exchanged key generation failed.
* @retval false key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_compute_key(void *dh_context, const uint8_t *peer_public_key,
size_t peer_public_key_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_FFDHE_SUPPORT */
#endif /* CRYPTLIB_DH_H */

View File

@@ -0,0 +1,162 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_EC_H
#define CRYPTLIB_EC_H
/*=====================================================================================
* Elliptic Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT)
/**
* Allocates and Initializes one Elliptic Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Elliptic Curve context that has been initialized.
* If the allocations fails, libspdm_ec_new_by_nid() returns NULL.
**/
extern void *libspdm_ec_new_by_nid(size_t nid);
/**
* Release the specified EC context.
*
* @param[in] ec_context Pointer to the EC context to be released.
**/
extern void libspdm_ec_free(void *ec_context);
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
#if LIBSPDM_ECDHE_SUPPORT
/**
* Generates EC key and returns EC public key (X, Y).
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* EC context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
*
* If ec_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[out] public Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true EC public X,Y generation succeeded.
* @retval false EC public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If ec_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
*
* For P-256, the peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size is 32.
* For P-384, the peer_public_size is 96. first 48-byte is X, second 48-byte is Y.
* The key_size is 48.
* For P-521, the peer_public_size is 132. first 66-byte is X, second 66-byte is Y.
* The key_size is 66.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in bytes.
*
* @retval true EC exchanged key generation succeeded.
* @retval false EC exchanged key generation failed.
* @retval false key_size is not large enough.
**/
extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_ECDHE_SUPPORT */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Carries out the EC-DSA signature.
*
* This function carries out the EC-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in EC-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the EC-DSA signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to EC-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in EC-DSA.
* @retval false Invalid signature or invalid EC context.
**/
extern bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#endif /* CRYPTLIB_EC_H */

View File

@@ -0,0 +1,100 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_ECD_H
#define CRYPTLIB_ECD_H
/*=====================================================================================
* Edwards-Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Allocates and Initializes one Edwards-Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Edwards-Curve context that has been initialized.
* If the allocations fails, libspdm_ecd_new_by_nid() returns NULL.
**/
extern void *libspdm_ecd_new_by_nid(size_t nid);
/**
* Release the specified Ed context.
*
* @param[in] ecd_context Pointer to the Ed context to be released.
**/
extern void libspdm_ecd_free(void *ecd_context);
/**
* Carries out the Ed-DSA signature.
*
* This function carries out the Ed-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be NULL.
* If sig_size is large enough but signature is NULL, then return false.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size size of the message in bytes.
* @param[out] signature Pointer to buffer to receive Ed-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in Ed-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_eddsa_sign(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the Ed-DSA signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be NULL.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to Ed-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in Ed-DSA.
* @retval false Invalid signature or invalid Ed context.
**/
extern bool libspdm_eddsa_verify(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#endif /* CRYPTLIB_ECD_H */

View File

@@ -0,0 +1,772 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HASH_H
#define CRYPTLIB_HASH_H
/* SHA-256 digest size in bytes. */
#define LIBSPDM_SHA256_DIGEST_SIZE 32
/* SHA-384 digest size in bytes. */
#define LIBSPDM_SHA384_DIGEST_SIZE 48
/* SHA-512 digest size in bytes. */
#define LIBSPDM_SHA512_DIGEST_SIZE 64
/* SHA3-256 digest size in bytes. */
#define LIBSPDM_SHA3_256_DIGEST_SIZE 32
/* SHA3-384 digest size in bytes. */
#define LIBSPDM_SHA3_384_DIGEST_SIZE 48
/* SHA3-512 digest size in bytes. */
#define LIBSPDM_SHA3_512_DIGEST_SIZE 64
/* SM3_256 digest size in bytes. */
#define LIBSPDM_SM3_256_DIGEST_SIZE 32
/*=====================================================================================
* One-way cryptographic hash SHA2 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, sha256_new() returns NULL. *
**/
extern void *libspdm_sha256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha256_free(void *sha256_context);
/**
* Initializes user-supplied memory pointed to by sha256_context as SHA-256 hash context for
* subsequent use.
*
* If sha256_context is NULL, then return false.
*
* @param[out] sha256_context Pointer to SHA-256 context being initialized.
*
* @retval true SHA-256 context initialization succeeded.
* @retval false SHA-256 context initialization failed.
**/
extern bool libspdm_sha256_init(void *sha256_context);
/**
* Makes a copy of an existing SHA-256 context.
*
* If sha256_context is NULL, then return false.
* If new_sha256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha256_context Pointer to SHA-256 context being copied.
* @param[out] new_sha256_context Pointer to new SHA-256 context.
*
* @retval true SHA-256 context copy succeeded.
* @retval false SHA-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_duplicate(const void *sha256_context, void *new_sha256_context);
/**
* Digests the input data and updates SHA-256 context.
*
* This function performs SHA-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-256 context should be already correctly initialized by libspdm_sha256_init(), and must not
* have been finalized by libspdm_sha256_final(). Behavior with invalid context is undefined.
*
* If sha256_context is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-256 data digest succeeded.
* @retval false SHA-256 data digest failed.
**/
extern bool libspdm_sha256_update(void *sha256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-256 digest value.
*
* This function completes SHA-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-256 context cannot
* be used again. SHA-256 context should be already correctly initialized by libspdm_sha256_init(),
* and must not have been finalized by libspdm_sha256_final(). Behavior with invalid SHA-256 context
* is undefined.
*
* If sha256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest
* value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
**/
extern bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value);
/**
* Computes the SHA-256 message digest of an input data buffer.
*
* This function performs the SHA-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha384_new() returns NULL.
**/
extern void *libspdm_sha384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha384_free(void *sha384_context);
/**
* Initializes user-supplied memory pointed to by sha384_context as SHA-384 hash context for
* subsequent use.
*
* If sha384_context is NULL, then return false.
*
* @param[out] sha384_context Pointer to SHA-384 context being initialized.
*
* @retval true SHA-384 context initialization succeeded.
* @retval false SHA-384 context initialization failed.
**/
extern bool libspdm_sha384_init(void *sha384_context);
/**
* Makes a copy of an existing SHA-384 context.
*
* If sha384_context is NULL, then return false.
* If new_sha384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha384_context Pointer to SHA-384 context being copied.
* @param[out] new_sha384_context Pointer to new SHA-384 context.
*
* @retval true SHA-384 context copy succeeded.
* @retval false SHA-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_duplicate(const void *sha384_context, void *new_sha384_context);
/**
* Digests the input data and updates SHA-384 context.
*
* This function performs SHA-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-384 context should be already correctly initialized by libspdm_sha384_init(), and must not
* have been finalized by libspdm_sha384_final(). Behavior with invalid context is undefined.
*
* If sha384_context is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-384 data digest succeeded.
* @retval false SHA-384 data digest failed.
**/
extern bool libspdm_sha384_update(void *sha384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-384 digest value.
*
* This function completes SHA-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-384 context cannot
* be used again. SHA-384 context should be already correctly initialized by libspdm_sha384_init(),
* and must not have been finalized by libspdm_sha384_final(). Behavior with invalid SHA-384 context
* is undefined.
*
* If sha384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest
* value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
**/
extern bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value);
/**
* Computes the SHA-384 message digest of an input data buffer.
*
* This function performs the SHA-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha512_new() returns NULL.
**/
extern void *libspdm_sha512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha512_free(void *sha512_context);
/**
* Initializes user-supplied memory pointed by sha512_context as SHA-512 hash context for
* subsequent use.
*
* If sha512_context is NULL, then return false.
*
* @param[out] sha512_context Pointer to SHA-512 context being initialized.
*
* @retval true SHA-512 context initialization succeeded.
* @retval false SHA-512 context initialization failed.
**/
extern bool libspdm_sha512_init(void *sha512_context);
/**
* Makes a copy of an existing SHA-512 context.
*
* If sha512_context is NULL, then return false.
* If new_sha512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha512_context Pointer to SHA-512 context being copied.
* @param[out] new_sha512_context Pointer to new SHA-512 context.
*
* @retval true SHA-512 context copy succeeded.
* @retval false SHA-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_duplicate(const void *sha512_context, void *new_sha512_context);
/**
* Digests the input data and updates SHA-512 context.
*
* This function performs SHA-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-512 context should be already correctly initialized by libspdm_sha512_init(), and must not
* have been finalized by libspdm_sha512_final(). Behavior with invalid context is undefined.
*
* If sha512_context is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-512 data digest succeeded.
* @retval false SHA-512 data digest failed.
**/
extern bool libspdm_sha512_update(void *sha512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-512 digest value.
*
* This function completes SHA-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-512 context cannot
* be used again. SHA-512 context should be already correctly initialized by libspdm_sha512_init(),
* and must not have been finalized by libspdm_sha512_final(). Behavior with invalid SHA-512 context
* is undefined.
*
* If sha512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest
* value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
**/
extern bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value);
/**
* Computes the SHA-512 message digest of an input data buffer.
*
* This function performs the SHA-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
/*=====================================================================================
* One-way cryptographic hash SHA3 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_256_new() returns NULL.
**/
extern void *libspdm_sha3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_256_free(void *sha3_256_context);
/**
* Initializes user-supplied memory pointed by sha3_256_context as SHA3-256 hash context for
* subsequent use.
*
* If sha3_256_context is NULL, then return false.
*
* @param[out] sha3_256_context Pointer to SHA3-256 context being initialized.
*
* @retval true SHA3-256 context initialization succeeded.
* @retval false SHA3-256 context initialization failed.
**/
extern bool libspdm_sha3_256_init(void *sha3_256_context);
/**
* Makes a copy of an existing SHA3-256 context.
*
* If sha3_256_context is NULL, then return false.
* If new_sha3_256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_256_context Pointer to SHA3-256 context being copied.
* @param[out] new_sha3_256_context Pointer to new SHA3-256 context.
*
* @retval true SHA3-256 context copy succeeded.
* @retval false SHA3-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_duplicate(const void *sha3_256_context, void *new_sha3_256_context);
/**
* Digests the input data and updates SHA3-256 context.
*
* This function performs SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-256 context should be already correctly initialized by libspdm_sha3_256_init(), and must not
* have been finalized by libspdm_sha3_256_final(). Behavior with invalid context is undefined.
*
* If sha3_256_context is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size size of data buffer in bytes.
*
* @retval true SHA3-256 data digest succeeded.
* @retval false SHA3-256 data digest failed.
**/
extern bool libspdm_sha3_256_update(void *sha3_256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-256 digest value.
*
* This function completes SHA3-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-256 context should be already correctly initialized by
* libspdm_sha3_256_init(), and must not have been finalized by libspdm_sha3_256_final().
* Behavior with invalid SHA3-256 context is undefined.
*
* If sha3_256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest
* value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
**/
extern bool libspdm_sha3_256_final(void *sha3_256_context, uint8_t *hash_value);
/**
* Computes the SHA3-256 message digest of an input data buffer.
*
* This function performs the SHA3-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_384_new() returns NULL.
**/
extern void *libspdm_sha3_384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_384_free(void *sha3_384_context);
/**
* Initializes user-supplied memory pointed by sha3_384_context as SHA3-384 hash context for
* subsequent use.
*
* If sha3_384_context is NULL, then return false.
*
* @param[out] sha3_384_context Pointer to SHA3-384 context being initialized.
*
* @retval true SHA3-384 context initialization succeeded.
* @retval false SHA3-384 context initialization failed.
**/
extern bool libspdm_sha3_384_init(void *sha3_384_context);
/**
* Makes a copy of an existing SHA3-384 context.
*
* If sha3_384_context is NULL, then return false.
* If new_sha3_384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_384_context Pointer to SHA3-384 context being copied.
* @param[out] new_sha3_384_context Pointer to new SHA3-384 context.
*
* @retval true SHA3-384 context copy succeeded.
* @retval false SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_duplicate(const void *sha3_384_context, void *new_sha3_384_context);
/**
* Digests the input data and updates SHA3-384 context.
*
* This function performs SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-384 context should be already correctly initialized by libspdm_sha3_384_init(), and must not
* have been finalized by libspdm_sha3_384_final(). Behavior with invalid context is undefined.
*
* If sha3_384_context is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-384 data digest succeeded.
* @retval false SHA3-384 data digest failed.
**/
extern bool libspdm_sha3_384_update(void *sha3_384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-384 digest value.
*
* This function completes SHA3-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-384 context cannot
* be used again. SHA3-384 context should be already correctly initialized by
* libspdm_sha3_384_init(), and must not have been finalized by libspdm_sha3_384_final().
* Behavior with invalid SHA3-384 context is undefined.
*
* If sha3_384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest
* value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
*
**/
extern bool libspdm_sha3_384_final(void *sha3_384_context, uint8_t *hash_value);
/**
* Computes the SHA3-384 message digest of an input data buffer.
*
* This function performs the SHA3-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_512_new() returns NULL.
**/
extern void *libspdm_sha3_512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_512_free(void *sha3_512_context);
/**
* Initializes user-supplied memory pointed by sha3_512_context as SHA3-512 hash context for
* subsequent use.
*
* If sha3_512_context is NULL, then return false.
*
* @param[out] sha3_512_context Pointer to SHA3-512 context being initialized.
*
* @retval true SHA3-512 context initialization succeeded.
* @retval false SHA3-512 context initialization failed.
**/
extern bool libspdm_sha3_512_init(void *sha3_512_context);
/**
* Makes a copy of an existing SHA3-512 context.
*
* If sha3_512_context is NULL, then return false.
* If new_sha3_512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_512_context Pointer to SHA3-512 context being copied.
* @param[out] new_sha3_512_context Pointer to new SHA3-512 context.
*
* @retval true SHA3-512 context copy succeeded.
* @retval false SHA3-512 context copy failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_sha3_512_duplicate(const void *sha3_512_context, void *new_sha3_512_context);
/**
* Digests the input data and updates SHA3-512 context.
*
* This function performs SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-512 context should be already correctly initialized by libspdm_sha3_512_init(), and must not
* have been finalized by libspdm_sha3_512_final(). Behavior with invalid context is undefined.
*
* If sha3_512_context is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-512 data digest succeeded.
* @retval false SHA3-512 data digest failed.
**/
extern bool libspdm_sha3_512_update(void *sha3_512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-512 digest value.
*
* This function completes SHA3-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-512 context should be already correctly initialized by
* libspdm_sha3_512_init(), and must not have been finalized by libspdm_sha3_512_final().
* Behavior with invalid SHA3-512 context is undefined.
*
* If sha3_512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest
* value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
**/
extern bool libspdm_sha3_512_final(void *sha3_512_context, uint8_t *hash_value);
/**
* Computes the SHA3-512 message digest of an input data buffer.
*
* This function performs the SHA3-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
/*=====================================================================================
* One-Way Cryptographic hash SM3 Primitives
*=====================================================================================
*/
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SM3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sm3_256_new() returns NULL.
**/
extern void *libspdm_sm3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sm3_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sm3_256_free(void *sm3_context);
/**
* Initializes user-supplied memory pointed by sm3_context as SM3 hash context for
* subsequent use.
*
* If sm3_context is NULL, then return false.
*
* @param[out] sm3_context Pointer to SM3 context being initialized.
*
* @retval true SM3 context initialization succeeded.
* @retval false SM3 context initialization failed.
**/
extern bool libspdm_sm3_256_init(void *sm3_context);
/**
* Makes a copy of an existing SM3 context.
*
* If sm3_context is NULL, then return false.
* If new_sm3_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sm3_context Pointer to SM3 context being copied.
* @param[out] new_sm3_context Pointer to new SM3 context.
*
* @retval true SM3 context copy succeeded.
* @retval false SM3 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_duplicate(const void *sm3_context, void *new_sm3_context);
/**
* Digests the input data and updates SM3 context.
*
* This function performs SM3 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SM3 context should be already correctly initialized by sm3_init(), and should not be finalized
* by sm3_final(). Behavior with invalid context is undefined.
*
* If sm3_context is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SM3 data digest succeeded.
* @retval false SM3 data digest failed.
**/
extern bool libspdm_sm3_256_update(void *sm3_context, const void *data, size_t data_size);
/**
* Completes computation of the SM3 digest value.
*
* This function completes SM3 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the SM3 context cannot
* be used again. SM3 context should be already correctly initialized by sm3_init(), and should not
* be finalized by sm3_final(). Behavior with invalid SM3 context is undefined.
*
* If sm3_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
**/
extern bool libspdm_sm3_256_final(void *sm3_context, uint8_t *hash_value);
/**
* Computes the SM3 message digest of an input data buffer.
*
* This function performs the SM3 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HASH_H */

View File

@@ -0,0 +1,266 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HKDF_H
#define CRYPTLIB_HKDF_H
/*=====================================================================================
* Key Derivation Function Primitives
*=====================================================================================*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Derive SHA-256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive prk value.
* @param[in] prk_out_size Size of prk bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Derive SHA384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Derive SHA512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Derive SHA3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Derive SHA3_384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Derive SHA3_512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Derive SM3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SM3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HKDF_H */

View File

@@ -0,0 +1,833 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_MAC_H
#define CRYPTLIB_MAC_H
/*=====================================================================================
* Message Authentication Code (MAC) Primitives
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha256_new() returns NULL.
**/
extern void *libspdm_hmac_sha256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha256_free(void *hmac_sha256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha256_update().
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha256_ctx Pointer to HMAC-SHA256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA256 context.
*
* If hmac_sha256_ctx is NULL, then return false.
* If new_hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha256_ctx Pointer to HMAC-SHA256 context being copied.
* @param[out] new_hmac_sha256_ctx Pointer to new HMAC-SHA256 context.
*
* @retval true HMAC-SHA256 context copy succeeded.
* @retval false HMAC-SHA256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, void *new_hmac_sha256_ctx);
/**
* Digests the input data and updates HMAC-SHA256 context.
*
* This function performs HMAC-SHA256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should not be
* finalized by libspdm_hmac_sha256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA256 data digest succeeded.
* @retval false HMAC-SHA256 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA256 digest value.
*
* This function completes HMAC-SHA256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA256 context cannot
* be used again. HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should
* not be finalized by libspdm_hmac_sha256_final(). Behavior with invalid HMAC-SHA256 context is
* undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA256 digest of a input data buffer.
*
* This function performs the HMAC-SHA256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha384_new() returns NULL.
**/
extern void *libspdm_hmac_sha384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha384_free(void *hmac_sha384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha384_update().
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha384_ctx Pointer to HMAC-SHA384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA384 context.
*
* If hmac_sha384_ctx is NULL, then return false.
* If new_hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha384_ctx Pointer to HMAC-SHA384 context being copied.
* @param[out] new_hmac_sha384_ctx Pointer to new HMAC-SHA384 context.
*
* @retval true HMAC-SHA384 context copy succeeded.
* @retval false HMAC-SHA384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, void *new_hmac_sha384_ctx);
/**
* Digests the input data and updates HMAC-SHA384 context.
*
* This function performs HMAC-SHA384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should not be
* finalized by libspdm_hmac_sha384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA384 data digest succeeded.
* @retval false HMAC-SHA384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA384 digest value.
*
* This function completes HMAC-SHA384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA384 context cannot
* be used again. HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should
* not be finalized by libspdm_hmac_sha384_final(). Behavior with invalid HMAC-SHA384 context is
* undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA384 digest of a input data buffer.
*
* This function performs the HMAC-SHA384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha512_new() returns NULL.
**/
extern void *libspdm_hmac_sha512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha512_free(void *hmac_sha512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha512_update().
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha512_ctx Pointer to HMAC-SHA512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA512 context.
*
* If hmac_sha512_ctx is NULL, then return false.
* If new_hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha512_ctx Pointer to HMAC-SHA512 context being copied.
* @param[out] new_hmac_sha512_ctx Pointer to new HMAC-SHA512 context.
*
* @retval true HMAC-SHA512 context copy succeeded.
* @retval false HMAC-SHA512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, void *new_hmac_sha512_ctx);
/**
* Digests the input data and updates HMAC-SHA512 context.
*
* This function performs HMAC-SHA512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should not be
* finalized by libspdm_hmac_sha512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA512 data digest succeeded.
* @retval false HMAC-SHA512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA512 digest value.
*
* This function completes HMAC-SHA512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA512 context cannot
* be used again. HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should
* not be finalized by libspdm_hmac_sha512_final(). Behavior with invalid HMAC-SHA512 context is
* undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA512 digest of a input data buffer.
*
* This function performs the HMAC-SHA512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_256_free(void *hmac_sha3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_256_update().
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sha3_256_set_key(void *hmac_sha3_256_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-256 context.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If new_hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context being copied.
* @param[out] new_hmac_sha3_256_ctx Pointer to new HMAC-SHA3-256 context.
*
* @retval true HMAC-SHA3-256 context copy succeeded.
* @retval false HMAC-SHA3-256 context copy failed.
**/
extern bool libspdm_hmac_sha3_256_duplicate(const void *hmac_sha3_256_ctx,
void *new_hmac_sha3_256_ctx);
/**
* Digests the input data and updates HMAC-SHA3-256 context.
*
* This function performs HMAC-SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and should not be
* finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-256 data digest succeeded.
* @retval false HMAC-SHA3-256 data digest failed.
**/
extern bool libspdm_hmac_sha3_256_update(void *hmac_sha3_256_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-256 digest value.
*
* This function completes HMAC-SHA3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-256 context cannot
* be used again. HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and
* should not be finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid HMAC-SHA3-256
* context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
**/
extern bool libspdm_hmac_sha3_256_final(void *hmac_sha3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-256 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_384_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_384_free(void *hmac_sha3_384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_384_update().
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_set_key(void *hmac_sha3_384_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-384 context.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If new_hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context being copied.
* @param[out] new_hmac_sha3_384_ctx Pointer to new HMAC-SHA3-384 context.
*
* @retval true HMAC-SHA3-384 context copy succeeded.
* @retval false HMAC-SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_duplicate(const void *hmac_sha3_384_ctx,
void *new_hmac_sha3_384_ctx);
/**
* Digests the input data and updates HMAC-SHA3-384 context.
*
* This function performs HMAC-SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and should not be
* finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-384 data digest succeeded.
* @retval false HMAC-SHA3-384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_update(void *hmac_sha3_384_ctx, const void *data,
size_t data_size);
/**
* Completes computation of the HMAC-SHA3-384 digest value.
*
* This function completes HMAC-SHA3-384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-384 context cannot
* be used again. HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and
* should not be finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid HMAC-SHA3-384
* context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_final(void *hmac_sha3_384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-384 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_512_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_512_free(void *hmac_sha3_512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_512_update().
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_set_key(void *hmac_sha3_512_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-512 context.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If new_hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context being copied.
* @param[out] new_hmac_sha3_512_ctx Pointer to new HMAC-SHA3-512 context.
*
* @retval true HMAC-SHA3-512 context copy succeeded.
* @retval false HMAC-SHA3-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_duplicate(const void *hmac_sha3_512_ctx,
void *new_hmac_sha3_512_ctx);
/**
* Digests the input data and updates HMAC-SHA3-512 context.
*
* This function performs HMAC-SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and should not be
* finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-512 data digest succeeded.
* @retval false HMAC-SHA3-512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_update(void *hmac_sha3_512_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-512 digest value.
*
* This function completes HMAC-SHA3-512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-512 context cannot
* be used again. HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and
* should not be finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid HMAC-SHA3-512
* context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_final(void *hmac_sha3_512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-512 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SM3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sm3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sm3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sm3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sm3_256_free(void *hmac_sm3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sm3_256_update().
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sm3_256_set_key(void *hmac_sm3_256_ctx,
const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SM3-256 context.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If new_hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context being copied.
* @param[out] new_hmac_sm3_256_ctx Pointer to new HMAC-SM3-256 context.
*
* @retval true HMAC-SM3-256 context copy succeeded.
* @retval false HMAC-SM3-256 context copy failed.
**/
extern bool libspdm_hmac_sm3_256_duplicate(const void *hmac_sm3_256_ctx,
void *new_hmac_sm3_256_ctx);
/**
* Digests the input data and updates HMAC-SM3-256 context.
*
* This function performs HMAC-SM3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and should not be
* finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SM3-256 data digest succeeded.
* @retval false HMAC-SM3-256 data digest failed.
**/
extern bool libspdm_hmac_sm3_256_update(void *hmac_sm3_256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SM3-256 digest value.
*
* This function completes HMAC-SM3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SM3-256 context cannot
* be used again. HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and
* should not be finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid HMAC-SM3-256
* context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
**/
extern bool libspdm_hmac_sm3_256_final(void *hmac_sm3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SM3-256 digest of a input data buffer.
*
* This function performs the HMAC-SM3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sm3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_MAC_H */

View File

@@ -0,0 +1,30 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RNG_H
#define CRYPTLIB_RNG_H
/*=====================================================================================
* Random Number Generation Primitive
*=====================================================================================*/
/**
* Generates a random byte stream of the specified size. If initialization, testing, or seeding of
* the (pseudo)random number generator is required it should be done before this function is called.
*
* If output is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] output Pointer to buffer to receive random value.
* @param[in] size Size of random bytes to generate.
*
* @retval true Random byte stream generated successfully.
* @retval false Generation of random byte stream failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_random_bytes(uint8_t *output, size_t size);
#endif /* CRYPTLIB_RNG_H */

View File

@@ -0,0 +1,264 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RSA_H
#define CRYPTLIB_RSA_H
/*=====================================================================================
* RSA Cryptography Primitives
*=====================================================================================
*/
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/* RSA key Tags Definition used in libspdm_rsa_set_key() function for key component
* identification.
*/
typedef enum {
LIBSPDM_RSA_KEY_N, /*< RSA public Modulus (N)*/
LIBSPDM_RSA_KEY_E, /*< RSA public exponent (e)*/
LIBSPDM_RSA_KEY_D, /*< RSA Private exponent (d)*/
LIBSPDM_RSA_KEY_P, /*< RSA secret prime factor of Modulus (p)*/
LIBSPDM_RSA_KEY_Q, /*< RSA secret prime factor of Modules (q)*/
LIBSPDM_RSA_KEY_DP, /*< p's CRT exponent (== d mod (p - 1))*/
LIBSPDM_RSA_KEY_DQ, /*< q's CRT exponent (== d mod (q - 1))*/
LIBSPDM_RSA_KEY_Q_INV /*< The CRT coefficient (== 1/q mod p)*/
} libspdm_rsa_key_tag_t;
/**
* Allocates and initializes one RSA context for subsequent use.
*
* @return Pointer to the RSA context that has been initialized.
* If the allocations fails, libspdm_rsa_new() returns NULL.
**/
extern void *libspdm_rsa_new(void);
/**
* Release the specified RSA context.
*
* If rsa_context is NULL, then return false.
*
* @param[in] rsa_context Pointer to the RSA context to be released.
**/
extern void libspdm_rsa_free(void *rsa_context);
/**
* Sets the tag-designated key component into the established RSA context.
*
* This function sets the tag-designated RSA key component into the established
* RSA context from the user-specified non-negative integer (octet string format
* represented in RSA PKCS#1).
* If big_number is NULL, then the specified key component in RSA context is cleared.
* If rsa_context is NULL, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag tag of RSA key component being set.
* @param[in] big_number Pointer to octet integer buffer.
* If NULL, then the specified key component in RSA
* context is cleared.
* @param[in] bn_size Size of big number buffer in bytes.
* If big_number is NULL, then it is ignored.
*
* @retval true RSA key component was set successfully.
* @retval false Invalid RSA key component tag.
**/
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
const uint8_t *big_number, size_t bn_size);
/**
* Gets the tag-designated RSA key component from the established RSA context.
*
* This function retrieves the tag-designated RSA key component from the
* established RSA context as a non-negative integer (octet string format
* represented in RSA PKCS#1).
* If specified key component has not been set or has been cleared, then returned
* bn_size is set to 0.
* If the big_number buffer is too small to hold the contents of the key, false
* is returned and bn_size is set to the required buffer size to obtain the key.
*
* If rsa_context is NULL, then return false.
* If bn_size is NULL, then return false.
* If bn_size is large enough but big_number is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag Tag of RSA key component being set.
* @param[out] big_number Pointer to octet integer buffer.
* @param[in, out] bn_size On input, the size of big number buffer in bytes.
* On output, the size of data returned in big number buffer in bytes.
*
* @retval true RSA key component was retrieved successfully.
* @retval false Invalid RSA key component tag.
* @retval false bn_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
uint8_t *big_number, size_t *bn_size);
/**
* Generates RSA key components.
*
* This function generates RSA key components. It takes RSA public exponent E and
* length in bits of RSA modulus N as input, and generates all key components.
* If public_exponent is NULL, the default RSA public exponent (0x10001) will be used.
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] modulus_length Length of RSA modulus N in bits.
* @param[in] public_exponent Pointer to RSA public exponent.
* @param[in] public_exponent_size Size of RSA public exponent buffer in bytes.
*
* @retval true RSA key component was generated successfully.
* @retval false Invalid RSA key component tag.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_generate_key(void *rsa_context, size_t modulus_length,
const uint8_t *public_exponent,
size_t public_exponent_size);
/**
* Validates key components of RSA context.
* NOTE: This function performs integrity checks on all the RSA key material, so
* the RSA key structure must contain all the private key data.
*
* This function validates key components of RSA context in following aspects:
* - Whether p is a prime
* - Whether q is a prime
* - Whether n = p * q
* - Whether d*e = 1 mod lcm(p-1,q-1)
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context to check.
*
* @retval true RSA key components are valid.
* @retval false RSA key components are not valid.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_check_key(void *rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_RSA_SSA_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme
* defined in RSA PKCS#1. If the signature buffer is too small to hold the contents of signature,
* false is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA PKCS1-v1_5 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in PKCS1-v1_5.
* @retval false signature generation failed.
* @retval false sig_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_pkcs1_sign_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PKCS1-v1_5 encoding scheme defined in RSA PKCS#1.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA PKCS1-v1_5 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in PKCS1-v1_5.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pkcs1_verify_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, const uint8_t *signature,
size_t sig_size);
#endif /* LIBSPDM_RSA_SSA_SUPPORT */
#if LIBSPDM_RSA_PSS_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined
* in RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in RSA-SSA PSS.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
#endif /* CRYPTLIB_RSA_H */

View File

@@ -0,0 +1,194 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_SM2_H
#define CRYPTLIB_SM2_H
/*=====================================================================================
* Shang-Mi2 Primitives
*=====================================================================================*/
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
**/
extern void libspdm_sm2_dsa_free(void *sm2_context);
/**
* Carries out the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* This function carries out the SM2 signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be SM3_256.
* If sig_size is large enough but signature is NULL, then return false.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to sm2 context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size Size of the message in bytes.
* @param[out] signature Pointer to buffer to receive SM2 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in SM2.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_sm2_dsa_sign(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be SM3_256.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to SM2 context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to SM2 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in SM2.
* @retval false Invalid signature or invalid sm2 context.
*
**/
extern bool libspdm_sm2_dsa_verify(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_key_exchange_new_by_nid(size_t nid);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
*
**/
extern void libspdm_sm2_key_exchange_free(void *sm2_context);
/**
* Initialize the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
* @param[in] hash_nid hash NID, only SM3 is valid.
* @param[in] id_a The ID-A of the key exchange context.
* @param[in] id_a_size Size of ID-A key exchange context.
* @param[in] id_b The ID-B of the key exchange context.
* @param[in] id_b_size Size of ID-B key exchange context.
* @param[in] is_initiator If the caller is initiator.
*
* @retval true sm2 context is initialized.
* @retval false sm2 context is not initialized.
**/
extern bool libspdm_sm2_key_exchange_init(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *id_b, size_t id_b_size,
bool is_initiator);
/**
* Generates sm2 key and returns sm2 public key (X, Y), based upon GB/T 32918.3-2016: SM2 - Part3.
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* sm2 context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* The public_size is 64. first 32-byte is X, second 32-byte is Y.
*
* If sm2_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[out] public_data Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true sm2 public X,Y generation succeeded.
* @retval false sm2 public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_sm2_key_exchange_generate_key(void *sm2_context, uint8_t *public_data,
size_t *public_size);
/**
* Computes exchanged common key, based upon GB/T 32918.3-2016: SM2 - Part3.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If sm2_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
*
* The id_a_size and id_b_size must be smaller than 2^16-1.
* The peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size must be smaller than 2^32-1, limited by KDF function.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in] key_size On input, the size of key buffer in bytes.
*
* @retval true sm2 exchanged key generation succeeded.
* @retval false sm2 exchanged key generation failed.
**/
extern bool libspdm_sm2_key_exchange_compute_key(void *sm2_context,
const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT */
#endif /* CRYPTLIB_SM2_H */

View File

@@ -0,0 +1,71 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef LIBSPDM_LIB_CONFIG_H
#define LIBSPDM_LIB_CONFIG_H
#ifndef LIBSPDM_CONFIG
#include "library/spdm_lib_config.h"
#else
#include LIBSPDM_CONFIG
#endif
#if defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) && \
!defined(LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use " \
"LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a " \
"future release.")
#else
#warning LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use \
LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a \
future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) */
#if defined(LIBSPDM_ENABLE_CHUNK_CAP) && !defined(LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP " \
"instead. This warning will be removed in a future release.")
#else
#warning LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_CHUNK_CAP) */
#if defined(MDEPKG_NDEBUG) && !defined(LIBSPDM_DEBUG_ENABLE)
#ifdef _MSC_VER
#pragma message("MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE " \
"instead. This warning will be removed in a future release.")
#else
#warning MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(MDEPKG_NDEBUG) */
#if defined(LIBSPDM_DEBUG_ENABLE)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
#elif defined(MDEPKG_NDEBUG)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE 0
#define LIBSPDM_DEBUG_PRINT_ENABLE 0
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
#if LIBSPDM_CHECK_MACRO
#include "internal/libspdm_macro_check.h"
#endif /* LIBSPDM_CHECK_MACRO */
#endif /* LIBSPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,154 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __INTERNAL_CRYPT_LIB_H__
#define __INTERNAL_CRYPT_LIB_H__
/*
* This code uses Linux Kernel Crypto API extensively. Web page written by
* Stephan Mueller and Marek Vasut is a good starting reference on how linux
* kernel provides crypto api.
*/
#include "conftest.h"
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/random.h>
#include <linux/string.h>
// Check if ECDH/ECDSA are there, on some platforms they might not be...
#ifndef AUTOCONF_INCLUDED
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT)
#include <generated/autoconf.h>
#else
#include <linux/autoconf.h>
#endif
#endif
#if \
(defined(CONFIG_CRYPTO_AEAD) || defined(CONFIG_CRYPTO_AEAD_MODULE)) && \
(defined(CONFIG_CRYPTO_AKCIPHER) || defined(CONFIG_CRYPTO_AKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_SKCIPHER) || defined(CONFIG_CRYPTO_SKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_HASH) || defined(CONFIG_CRYPTO_HASH_MODULE)) && \
(defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDH) || defined(CONFIG_CRYPTO_ECDH_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDSA) || defined(CONFIG_CRYPTO_ECDSA_MODULE)) && \
(defined(CONFIG_X509_CERTIFICATE_PARSER) || defined(CONFIG_X509_CERTIFICATE_PARSER_MODULE))
#define NV_CONFIG_CRYPTO_PRESENT 1
#endif
/*
* It is possible that we don't have access to all the functions we have. This
* could be either because we are running non-gpl kernel, because kernel is too
* old or even just user disabled. If we should use LKCA, include headers, else
* define stubs to return errors.
*/
#if defined(NV_CRYPTO_PRESENT) && defined (NV_CONFIG_CRYPTO_PRESENT)
#define USE_LKCA 1
#endif
#ifdef USE_LKCA
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sm3.h>
// HASH_MAX_DIGESTSIZE is available since 4.20.
// This value is accurate as of 6.1
#ifndef HASH_MAX_DIGESTSIZE
#define HASH_MAX_DIGESTSIZE 64
#endif
#else
// Just stub everything out
struct shash_desc;
struct crypto_shash;
#define crypto_shash_setkey(...) -ENOMEM
#define crypto_shash_init(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_final(...) -ENOMEM
#endif
#define CHAR_BIT 8U
#undef SIZE_MAX
#define SIZE_MAX 8
#include "library/cryptlib.h"
#define LIBSPDM_ASSERT(...)
struct lkca_aead_ctx;
int lkca_aead_alloc(struct lkca_aead_ctx **ctx, char const *alg);
void lkca_aead_free(struct lkca_aead_ctx *ctx);
int lkca_aead_ex(struct lkca_aead_ctx *ctx,
const uint8_t *key, size_t key_size,
uint8_t *iv, size_t iv_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc);
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg);
void *lkca_hash_new(const char* alg_name);
void lkca_hash_free(struct shash_desc *ctx);
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value);
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hmac_set_key(struct shash_desc *ctx, const uint8_t *key, size_t key_size);
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value);
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size);
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size);
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size);
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size);
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif

View File

@@ -0,0 +1,109 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
/** @file
* Defines base cryptographic library APIs.
* The Base Cryptographic Library provides implementations of basic cryptography
* primitives (hash Serials, HMAC, AES, RSA, Diffie-Hellman, Elliptic Curve, etc) for security
* functionality enabling.
**/
#ifndef CRYPTLIB_H
#define CRYPTLIB_H
#include "internal/libspdm_lib_config.h"
#define LIBSPDM_CRYPTO_NID_NULL 0x0000
/* Hash */
#define LIBSPDM_CRYPTO_NID_SHA256 0x0001
#define LIBSPDM_CRYPTO_NID_SHA384 0x0002
#define LIBSPDM_CRYPTO_NID_SHA512 0x0003
#define LIBSPDM_CRYPTO_NID_SHA3_256 0x0004
#define LIBSPDM_CRYPTO_NID_SHA3_384 0x0005
#define LIBSPDM_CRYPTO_NID_SHA3_512 0x0006
#define LIBSPDM_CRYPTO_NID_SM3_256 0x0007
/* Signing */
#define LIBSPDM_CRYPTO_NID_RSASSA2048 0x0101
#define LIBSPDM_CRYPTO_NID_RSASSA3072 0x0102
#define LIBSPDM_CRYPTO_NID_RSASSA4096 0x0103
#define LIBSPDM_CRYPTO_NID_RSAPSS2048 0x0104
#define LIBSPDM_CRYPTO_NID_RSAPSS3072 0x0105
#define LIBSPDM_CRYPTO_NID_RSAPSS4096 0x0106
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P256 0x0107
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P384 0x0108
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P521 0x0109
#define LIBSPDM_CRYPTO_NID_SM2_DSA_P256 0x010A
#define LIBSPDM_CRYPTO_NID_EDDSA_ED25519 0x010B
#define LIBSPDM_CRYPTO_NID_EDDSA_ED448 0x010C
/* Key Exchange */
#define LIBSPDM_CRYPTO_NID_FFDHE2048 0x0201
#define LIBSPDM_CRYPTO_NID_FFDHE3072 0x0202
#define LIBSPDM_CRYPTO_NID_FFDHE4096 0x0203
#define LIBSPDM_CRYPTO_NID_SECP256R1 0x0204
#define LIBSPDM_CRYPTO_NID_SECP384R1 0x0205
#define LIBSPDM_CRYPTO_NID_SECP521R1 0x0206
#define LIBSPDM_CRYPTO_NID_SM2_KEY_EXCHANGE_P256 0x0207
#define LIBSPDM_CRYPTO_NID_CURVE_X25519 0x0208
#define LIBSPDM_CRYPTO_NID_CURVE_X448 0x0209
/* AEAD */
#define LIBSPDM_CRYPTO_NID_AES_128_GCM 0x0301
#define LIBSPDM_CRYPTO_NID_AES_256_GCM 0x0302
#define LIBSPDM_CRYPTO_NID_CHACHA20_POLY1305 0x0303
#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304
/* X.509 v3 key usage extension flags. */
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80 /* bit 0 */
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40 /* bit 1 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20 /* bit 2 */
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10 /* bit 3 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08 /* bit 4 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04 /* bit 5 */
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02 /* bit 6 */
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01 /* bit 7 */
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000 /* bit 8 */
/* These constants comply with the DER encoded ASN.1 type tags. */
#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01
#define LIBSPDM_CRYPTO_ASN1_INTEGER 0x02
#define LIBSPDM_CRYPTO_ASN1_BIT_STRING 0x03
#define LIBSPDM_CRYPTO_ASN1_OCTET_STRING 0x04
#define LIBSPDM_CRYPTO_ASN1_NULL 0x05
#define LIBSPDM_CRYPTO_ASN1_OID 0x06
#define LIBSPDM_CRYPTO_ASN1_UTF8_STRING 0x0C
#define LIBSPDM_CRYPTO_ASN1_SEQUENCE 0x10
#define LIBSPDM_CRYPTO_ASN1_SET 0x11
#define LIBSPDM_CRYPTO_ASN1_PRINTABLE_STRING 0x13
#define LIBSPDM_CRYPTO_ASN1_T61_STRING 0x14
#define LIBSPDM_CRYPTO_ASN1_IA5_STRING 0x16
#define LIBSPDM_CRYPTO_ASN1_UTC_TIME 0x17
#define LIBSPDM_CRYPTO_ASN1_GENERALIZED_TIME 0x18
#define LIBSPDM_CRYPTO_ASN1_UNIVERSAL_STRING 0x1C
#define LIBSPDM_CRYPTO_ASN1_BMP_STRING 0x1E
#define LIBSPDM_CRYPTO_ASN1_PRIMITIVE 0x00
#define LIBSPDM_CRYPTO_ASN1_CONSTRUCTED 0x20
#define LIBSPDM_CRYPTO_ASN1_CONTEXT_SPECIFIC 0x80
#define LIBSPDM_CRYPTO_ASN1_TAG_CLASS_MASK 0xC0
#define LIBSPDM_CRYPTO_ASN1_TAG_PC_MASK 0x20
#define LIBSPDM_CRYPTO_ASN1_TAG_VALUE_MASK 0x1F
#include "hal/library/cryptlib/cryptlib_hash.h"
#include "hal/library/cryptlib/cryptlib_mac.h"
#include "hal/library/cryptlib/cryptlib_aead.h"
#include "hal/library/cryptlib/cryptlib_cert.h"
#include "hal/library/cryptlib/cryptlib_hkdf.h"
#include "hal/library/cryptlib/cryptlib_rsa.h"
#include "hal/library/cryptlib/cryptlib_ec.h"
#include "hal/library/cryptlib/cryptlib_dh.h"
#include "hal/library/cryptlib/cryptlib_ecd.h"
#include "hal/library/cryptlib/cryptlib_sm2.h"
#include "hal/library/cryptlib/cryptlib_rng.h"
#endif /* CRYPTLIB_H */

View File

@@ -0,0 +1,415 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef SPDM_LIB_CONFIG_H
#define SPDM_LIB_CONFIG_H
/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or
* sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and
* `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`.
*
* Note that if this file is used with CMake and `DTARGET=Release` is defined, then all debugging
* is disabled.
*/
#ifndef LIBSPDM_DEBUG_ENABLE
#define LIBSPDM_DEBUG_ENABLE 1
#endif
/* The SPDM specification allows a Responder to return up to 256 version entries in the `VERSION`
* response to the Requester, including duplicate entries. For a Requester this value specifies the
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
* error. A similiar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
* is not meant to be configured by the Integrator.
*/
#ifndef LIBSPDM_MAX_VERSION_COUNT
#define LIBSPDM_MAX_VERSION_COUNT 5
#endif
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and,
* if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are
* typically random or monotonically increasing numbers.
*/
#ifndef LIBSPDM_PSK_CONTEXT_LENGTH
#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE
#endif
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field.*/
#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH
#define LIBSPDM_PSK_MAX_HINT_LENGTH 16
#endif
/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when
* verifying certificate chains from an endpoint. This value specifies the maximum number of root
* certificates that libspdm can support.
*/
#ifndef LIBSPDM_MAX_ROOT_CERT_SUPPORT
#define LIBSPDM_MAX_ROOT_CERT_SUPPORT 10
#endif
/* If the Responder supports it a Requester is allowed to establish multiple secure sessions with
* the Responder. This value specifies the maximum number of sessions libspdm can support.
*/
#ifndef LIBSPDM_MAX_SESSION_COUNT
#define LIBSPDM_MAX_SESSION_COUNT 4
#endif
/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a
* libspdm context.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE
#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000
#endif
#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE
#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000
#endif
/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages
* the complete certificate chain can be constructed. This value specifies the maximum size,
* in bytes, of a partial certificate that can be sent or received.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
#endif
#ifndef LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_BUFFER_SIZE 0x1200
#endif
#ifndef LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE 0x100 /* to hold message_a before negotiate*/
#endif
#ifndef LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE 0x300 /* to hold message_k before finished_key is ready*/
#endif
/* If the Responder replies with a Busy `ERROR` response to a request then the Requester is free to
* retry sending the request. This value specifies the maximum number of times libspdm will retry
* sending the request before returning an error. If its value is 0 then libspdm will not send any
* retry requests.
*/
#ifndef LIBSPDM_MAX_REQUEST_RETRY_TIMES
#define LIBSPDM_MAX_REQUEST_RETRY_TIMES 3
#endif
#ifndef LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM
#define LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM
#define LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM
#define LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_CSR_SIZE
#define LIBSPDM_MAX_CSR_SIZE 0x1000
#endif
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
* cryptographic digests and signatures over multiple requests and responses. This value specifies
* whether libspdm will use a running calculation over the transcript, where requests and responses
* are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire
* transcript before calculating the digest or signature.
*/
#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
#endif
/* Cryptography Configuration
* In each category, at least one should be selected.
* NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/
#ifndef LIBSPDM_RSA_SSA_SUPPORT
#define LIBSPDM_RSA_SSA_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_PSS_SUPPORT
#define LIBSPDM_RSA_PSS_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDSA_SUPPORT
#define LIBSPDM_ECDSA_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_DSA_SUPPORT
#define LIBSPDM_SM2_DSA_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT
#define LIBSPDM_EDDSA_ED25519_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED448_SUPPORT
#define LIBSPDM_EDDSA_ED448_SUPPORT 1
#endif
#ifndef LIBSPDM_FFDHE_SUPPORT
#define LIBSPDM_FFDHE_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDHE_SUPPORT
#define LIBSPDM_ECDHE_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_GCM_SUPPORT
#define LIBSPDM_AEAD_GCM_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_SM4_SUPPORT
#define LIBSPDM_AEAD_SM4_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA256_SUPPORT
#define LIBSPDM_SHA256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA384_SUPPORT
#define LIBSPDM_SHA384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA512_SUPPORT
#define LIBSPDM_SHA512_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_256_SUPPORT
#define LIBSPDM_SHA3_256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_384_SUPPORT
#define LIBSPDM_SHA3_384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_512_SUPPORT
#define LIBSPDM_SHA3_512_SUPPORT 1
#endif
#ifndef LIBSPDM_SM3_256_SUPPORT
#define LIBSPDM_SM3_256_SUPPORT 1
#endif
/* Code space optimization for Optional request/response messages.*/
/* Consumers of libspdm may wish to not fully implement all of the optional
* SPDM request/response messages. Therefore we have provided these
* SPDM_ENABLE_CAPABILITY_***_CAP compile time switches as an optimization
* disable the code (#if 0) related to said optional capability, thereby
* reducing the code space used in the image.*/
/* A single switch may enable/disable a single capability or group of related
* capabilities.*/
/* LIBSPDM_ENABLE_CAPABILITY_CERT_CAP - Enable/Disable single CERT capability.
* LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP - Enable/Disable single CHAL capability.
* LIBSPDM_ENABLE_CAPABILTIY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
* (MEAS_CAP_NO_SIG, MEAS_CAP_SIG, MEAS_FRESH_CAP)*/
/* LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP - Enable/Disable single Key Exchange capability.
* LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
/* LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP - Enable/Disable mutual authentication.
* LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP - Enable/Disable encapsulated message.*/
/* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP - Enable/Disable get csr capability.
* LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP - Enable/Disable set certificate capability. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP
#define LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
#define LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
#endif
/*
* MinDataTransferSize = 42
*
* H = HashLen = HmacLen = [32, 64]
* S = SigLen = [64, 512]
* D = ExchangeDataLen = [64, 512]
* R = RequesterContextLen >= 32
* R = ResponderContextLen >= 0
* O = OpaqueDataLen <= 1024
*
* Max Chunk No = 1, if (message size <= 42)
* Max Chunk No = [(message size + 4) / 30] roundup, if (message size > 42)
*
* +==========================+==========================================+=========+
* | Command | Size |MaxChunk |
* +==========================+==========================================+=========+
* | GET_VERSION | 4 | 1 |
* | VERSION {1.0, 1.1, 1.2} | 6 + 2 * 3 = 12 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CAPABILITIES 1.2 | 20 | 1 |
* | CAPABILITIES 1.2 | 20 | 1 |
* +--------------------------+------------------------------------------+---------+
* | ERROR | 4 | 1 |
* | ERROR(ResponseTooLarge) | 4 + 4 = 8 | 1 |
* | ERROR(LargeResponse) | 4 + 1 = 5 | 1 |
* | ERROR(ResponseNotReady) | 4 + 4 = 8 | 1 |
* +--------------------------+------------------------------------------+---------+
* | CHUNK_SEND header | 12 + L0 (0 or 4) | 1 |
* | CHUNK_RESPONSE header | 12 + L0 (0 or 4) | 1 |
* +==========================+==========================================+=========+
* | NEGOTIATE_ALGORITHMS 1.2 | 32 + 4 * 4 = 48 | 2 |
* | ALGORITHMS 1.2 | 36 + 4 * 4 = 52 | 2 |
* +--------------------------+------------------------------------------+---------+
* | GET_DIGESTS 1.2 | 4 | 1 |
* | DIGESTS 1.2 | 4 + H * SlotNum = [36, 516] | [1, 18] |
* +--------------------------+------------------------------------------+---------+
* | GET_CERTIFICATE 1.2 | 8 | 1 |
* | CERTIFICATE 1.2 | 8 + PortionLen | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | CHALLENGE 1.2 | 40 | 1 |
* | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] |
* +--------------------------+------------------------------------------+---------+
* | GET_MEASUREMENTS 1.2 | 5 + Nounce (0 or 32) | 1 |
* | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] |
* +--------------------------+------------------------------------------+---------+
* | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] |
* | KEY_EXCHANGE_RSP 1.2 | 42 + D + H + S (+ H) [+ O] = [234, 1194] | [8, 40] |
* +--------------------------+------------------------------------------+---------+
* | FINISH 1.2 | 4 (+ S) + H = [100, 580] | [4, 20] |
* | FINISH_RSP 1.2 | 4 (+ H) = [36, 69] | [1, 3] |
* +--------------------------+------------------------------------------+---------+
* | PSK_EXCHANGE 1.2 | 12 [+ PSKHint] + R [+ O] = 44 | 2 |
* | PSK_EXCHANGE_RSP 1.2 | 12 + R + H (+ H) [+ O] = [108, 172] | [4, 6] |
* +--------------------------+------------------------------------------+---------+
* | PSK_FINISH 1.2 | 4 + H = [36, 68] | [1, 3] |
* | PSK_FINISH_RSP 1.2 | 4 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CSR 1.2 | 8 + RequesterInfoLen [+ O] | [1, ] |
* | CSR 1.2 | 8 + CSRLength | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | SET_CERTIFICATE 1.2 | 4 + CertChainLen | [1, ] |
* | SET_CERTIFICATE_RSP 1.2 | 4 | 1 |
* +==========================+==========================================+=========+
*/
/* Maximum size of a large SPDM message.
* If chunk is unsupported, it must be same as LIBSPDM_DATA_TRANSFER_SIZE.
* If chunk is supported, it must be larger than LIBSPDM_DATA_TRANSFER_SIZE.
* It matches MaxSPDMmsgSize in SPDM specification. */
#ifndef LIBSPDM_MAX_SPDM_MSG_SIZE
#define LIBSPDM_MAX_SPDM_MSG_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#endif
/* Maximum size of a single SPDM message.
* It matches DataTransferSize in SPDM specification. */
#ifndef LIBSPDM_DATA_TRANSFER_SIZE
#define LIBSPDM_DATA_TRANSFER_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#endif
/* Required sender/receive buffer in device io.
* NOTE: This is transport specific. Below configuration is just an example.
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
* | TYPE |TransHdr| EncryptionHeader |AppHdr| |Random|MAC|AlignPad|FINAL|
* | | |SessionId|SeqNum|Len|AppLen| | | | | | |
* +-------+--------+---------------------------+------+ +------+---+--------+-----+
* | MCTP | 1 | 4 | 2 | 2 | 2 | 1 | | 32 | 12| 0 | 56 |
* |PCI_DOE| 8 | 4 | 0 | 2 | 2 | 0 | | 0 | 12| 3 | 31 |
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
*/
#ifndef LIBSPDM_TRANSPORT_ADDITIONAL_SIZE
#define LIBSPDM_TRANSPORT_ADDITIONAL_SIZE 64
#endif
#ifndef LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE
#define LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE (LIBSPDM_DATA_TRANSFER_SIZE + \
LIBSPDM_TRANSPORT_ADDITIONAL_SIZE)
#endif
/* Required scratch buffer size for libspdm internal usage.
* It may be used to hold the encrypted/decrypted message and/or last sent/received message.
* It may be used to hold the large request/response and intermediate send/receive buffer
* in case of chunking.
*
* If chunking is not supported, it may be just LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE.
* If chunking is supported, it should be at least below.
*
* +---------------+--------------+--------------------------+------------------------------+
* |SECURE_MESSAGE |LARGE_MESSAGE | SENDER_RECEIVER | LARGE_SENDER_RECEIVER |
* +---------------+--------------+--------------------------+------------------------------+
* |<-Secure msg ->|<-Large msg ->|<-Snd/Rcv buf for chunk ->|<-Snd/Rcv buf for large msg ->|
*
* The value is NOT configurable.
* The value MAY be changed in different libspdm version.
* It is exposed here, just in case the libspdm consumer wants to configure the setting at build time.
*/
#if LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
/* first section */
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_OFFSET 0
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* second section */
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_OFFSET (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* third section */
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_OFFSET \
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* fourth section */
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_OFFSET \
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY \
)
#else
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE)
#endif
/* Enable message logging.
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
* for more information */
#ifndef LIBSPDM_ENABLE_MSG_LOG
#define LIBSPDM_ENABLE_MSG_LOG 1
#endif
/* Enable macro checking during compilation. */
#ifndef LIBSPDM_CHECK_MACRO
#define LIBSPDM_CHECK_MACRO 0
#endif
#endif /* SPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,470 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#include "nvspdm_cryptlib_extensions.h"
#ifdef USE_LKCA
#define BUFFER_SIZE (2 * 1024 * 1024)
#define AUTH_TAG_SIZE 16
struct lkca_aead_ctx
{
struct crypto_aead *aead;
struct aead_request *req;
char *a_data_buffer;
char *in_buffer;
char *out_buffer;
char tag[AUTH_TAG_SIZE];
};
#endif
int libspdm_aead_prealloc(void **context, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct lkca_aead_ctx *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) {
return -ENOMEM;
}
memset(ctx, 0, sizeof(*ctx));
ctx->aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(ctx->aead)) {
pr_notice("could not allocate AEAD algorithm\n");
kfree(ctx);
return -ENODEV;
}
ctx->req = aead_request_alloc(ctx->aead, GFP_KERNEL);
if (ctx->req == NULL) {
pr_info("could not allocate skcipher request\n");
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->a_data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->a_data_buffer == NULL) {
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->in_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->in_buffer == NULL) {
kfree(ctx->a_data_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->out_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->out_buffer == NULL) {
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
*context = ctx;
return 0;
#endif
}
void libspdm_aead_free(void *context)
{
#ifdef USE_LKCA
struct lkca_aead_ctx *ctx = context;
crypto_free_aead(ctx->aead);
aead_request_free(ctx->req);
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
kfree(ctx->out_buffer);
kfree(ctx);
#endif
}
#define SG_AEAD_AAD 0
#define SG_AEAD_TEXT 1
#define SG_AEAD_SIG 2
// Number of fields in AEAD scatterlist
#define SG_AEAD_LEN 3
#ifdef USE_LKCA
// This function doesn't do any allocs, it uses temp buffers instead
static int lkca_aead_internal(struct crypto_aead *aead,
struct aead_request *req,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
struct scatterlist sg_in[],
struct scatterlist sg_out[],
size_t a_data_size,
size_t data_in_size,
size_t *data_out_size,
size_t tag_size,
bool enc)
{
DECLARE_CRYPTO_WAIT(wait);
int rc = 0;
if (crypto_aead_setkey(aead, key, key_size)) {
pr_info("key could not be set\n");
return -EINVAL;
}
if (crypto_aead_ivsize(aead) != iv_size) {
pr_info("iv could not be set\n");
return -EINVAL;
}
aead_request_set_ad(req, a_data_size);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
if (enc) {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_encrypt(req), &wait);
} else {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size + tag_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_decrypt(req), &wait);
}
if (rc != 0) {
pr_info("Encryption FAILED\n");
}
*data_out_size = data_in_size;
return rc;
}
#endif
int libspdm_aead_prealloced(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc)
{
#ifndef USE_LKCA
return -ENODEV;
#else
int rc = 0;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
struct lkca_aead_ctx *ctx = context;
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
if (a_data_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
memcpy(ctx->a_data_buffer, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], ctx->in_buffer, data_in_size);
memcpy(ctx->in_buffer, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], ctx->out_buffer, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
// Tag is small enough that memcpy is cheaper than checking if page is virtual
if(tag_size > AUTH_TAG_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], ctx->tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], ctx->tag, tag_size);
if(!enc)
memcpy(ctx->tag, tag, tag_size);
rc = lkca_aead_internal(ctx->aead, ctx->req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc) {
memcpy(tag, ctx->tag, tag_size);
}
if (!virt_addr_valid(data_out)) {
memcpy(data_out, ctx->out_buffer, data_in_size);
}
return rc;
#endif
}
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct crypto_aead *aead = NULL;
struct aead_request *req = NULL;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
uint8_t *a_data_shadow = NULL;
uint8_t *data_in_shadow = NULL;
uint8_t *data_out_shadow = NULL;
uint8_t *tag_shadow = NULL;
int rc = 0;
aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(aead)) {
pr_notice("could not allocate AEAD algorithm\n");
return -ENODEV;
}
req = aead_request_alloc(aead, GFP_KERNEL);
if (req == NULL) {
pr_info("could not allocate skcipher request\n");
rc = -ENOMEM;
goto out;
}
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
a_data_shadow = kmalloc(a_data_size, GFP_KERNEL);
if (a_data_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data_shadow, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data_shadow, a_data_size);
memcpy(a_data_shadow, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
data_in_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_in_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in_shadow, data_in_size);
memcpy(data_in_shadow, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
data_out_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_out_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out_shadow, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
if (!virt_addr_valid(tag)) {
tag_shadow = kmalloc(tag_size, GFP_KERNEL);
if (tag_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], tag_shadow, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag_shadow, tag_size);
if(!enc)
memcpy(tag_shadow, tag, tag_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_SIG], tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag, tag_size);
}
rc = lkca_aead_internal(aead, req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc && (tag_shadow != NULL))
memcpy((uint8_t *) tag, tag_shadow, tag_size);
if (data_out_shadow != NULL)
memcpy(data_out, data_out_shadow, data_in_size);
out:
if (a_data_shadow != NULL)
kfree(a_data_shadow);
if (data_in_shadow != NULL)
kfree(data_in_shadow);
if (data_out != NULL)
kfree(data_out_shadow);
if (tag != NULL)
kfree(tag_shadow);
if (aead != NULL)
crypto_free_aead(aead);
if (req != NULL)
aead_request_free(req);
return rc;
#endif
}
// Wrapper to make look like libspdm
bool libspdm_aead_gcm_prealloc(void **context)
{
return libspdm_aead_prealloc(context, "gcm(aes)") == 0;
}
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
tag_out, tag_size, data_out, data_out_size, true);
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
(uint8_t *) tag, tag_size, data_out, data_out_size, false);
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,117 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag_out, tag_size,
data_out, data_out_size, true, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag, tag_size,
data_out, data_out_size, false, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,172 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
static bool lkca_ecdsa_sign(void *ec_context,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
return false;
}
bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
if (ec_context == NULL || public_key == NULL) {
return false;
}
return lkca_ec_set_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
if (ec_context == NULL || public_key_size == NULL) {
return false;
}
if (public_key == NULL && *public_key_size != 0) {
return false;
}
return lkca_ec_get_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_check_key(const void *ec_context)
{
/* TBD*/
return true;
}
bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
if (ec_context == NULL || public_size == NULL) {
return false;
}
if (public_data == NULL && *public_size != 0) {
return false;
}
return lkca_ec_generate_key(ec_context, public_data, public_size);
}
bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
if (ec_context == NULL || peer_public == NULL || key_size == NULL ||
key == NULL) {
return false;
}
if (peer_public_size > INT_MAX) {
return false;
}
return lkca_ec_compute_key(ec_context, peer_public, peer_public_size, key,
key_size);
}
bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
if (ec_context == NULL || message_hash == NULL) {
return false;
}
if (signature == NULL) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_sign(ec_context, message_hash, hash_size, signature, sig_size);
}
bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
if (ec_context == NULL || message_hash == NULL || signature == NULL) {
return false;
}
if (sig_size > INT_MAX || sig_size == 0) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_verify(ec_context, hash_nid, message_hash, hash_size,
signature, sig_size);
}

View File

@@ -0,0 +1,326 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <linux/module.h>
MODULE_SOFTDEP("pre: ecdh_generic,ecdsa_generic");
#include <crypto/akcipher.h>
#include <crypto/ecdh.h>
#include <crypto/internal/ecc.h>
struct ecc_ctx {
unsigned int curve_id;
u64 priv_key[ECC_MAX_DIGITS]; // In big endian
struct {
// ecdsa wants byte preceding pub_key to be set to '4'
u64 pub_key_prefix;
u64 pub_key[2 * ECC_MAX_DIGITS];
};
bool pub_key_set;
bool priv_key_set;
char const *name;
int size;
};
#endif
void *libspdm_ec_new_by_nid(size_t nid)
{
#ifndef USE_LKCA
return NULL;
#else
struct ecc_ctx *ctx;
if ((nid != LIBSPDM_CRYPTO_NID_SECP256R1) && (nid != LIBSPDM_CRYPTO_NID_SECP384R1)){
return NULL;
}
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
return NULL;
}
if (nid == LIBSPDM_CRYPTO_NID_SECP256R1) {
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->size = 64;
ctx->name = "ecdsa-nist-p256";
} else {
ctx->curve_id = ECC_CURVE_NIST_P384;
ctx->size = 96;
ctx->name = "ecdsa-nist-p384";
}
ctx->pub_key_set = false;
ctx->priv_key_set = false;
return ctx;
#endif
}
void libspdm_ec_free(void *ec_context)
{
#ifdef USE_LKCA
kfree(ec_context);
#endif
}
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = context;
unsigned int ndigits = ctx->size / 16;
if (key_size != (ctx->size / 2)) {
return false;
}
memcpy(ctx->priv_key, key, key_size);
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
ctx->pub_key_set = true;
ctx->priv_key_set = true;
return true;
#endif
}
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
struct ecc_point pub_key;
unsigned int ndigits;
if (public_key_size != ctx->size) {
return false;
}
// We can reuse pub_key for now
ndigits = ctx->size / 16;
pub_key = ECC_POINT_INIT(ctx->pub_key, ctx->pub_key + ndigits, ndigits);
ecc_swap_digits(public_key, ctx->pub_key, ndigits);
ecc_swap_digits(((u64 *)public_key) + ndigits, ctx->pub_key + ndigits, ndigits);
if(ecc_is_pubkey_valid_full(ecc_get_curve(ctx->curve_id), &pub_key)) {
return false;
}
memcpy(ctx->pub_key, public_key, public_key_size);
ctx->pub_key_set = true;
return true;
#endif
}
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (*public_key_size < ctx->size) {
*public_key_size = ctx->size;
return false;
}
*public_key_size = ctx->size;
memcpy(public_key, ctx->pub_key, ctx->size);
return true;
#endif
}
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
unsigned int ndigits = ctx->size / 16;
if(ecc_gen_privkey(ctx->curve_id, ndigits, ctx->priv_key)) {
return false;
}
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
memcpy(public_data, ctx->pub_key, ctx->size);
*public_size = ctx->size;
ctx->priv_key_set = true;
ctx->pub_key_set = true;
return true;
#endif
}
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (peer_public_size != ctx->size) {
return false;
}
if (!ctx->priv_key_set) {
return false;
}
if ((ctx->size / 2) > *key_size) {
return false;
}
if (crypto_ecdh_shared_secret(ctx->curve_id, ctx->size / 16,
(const u64 *) ctx->priv_key,
(const u64 *) peer_public,
(u64 *) key)) {
return false;
}
*key_size = ctx->size / 2;
return true;
#endif
}
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
// Roundabout way
u64 ber_max_len = 3 + 2 * (4 + (ECC_MAX_BYTES));
u64 ber_len = 0;
u8 *ber = NULL;
u8 *pub_key;
struct akcipher_request *req = NULL;
struct crypto_akcipher *tfm = NULL;
struct scatterlist sg;
DECLARE_CRYPTO_WAIT(wait);
int err;
if (sig_size != ctx->size) {
return false;
}
if(ctx->pub_key_set == false){
return false;
}
tfm = crypto_alloc_akcipher(ctx->name, CRYPTO_ALG_TYPE_AKCIPHER, 0);
if (IS_ERR(tfm)) {
pr_info("ALLOC FAILED\n");
return false;
}
pub_key = (u8 *) ctx->pub_key;
pub_key--; // Go back into byte of pub_key_prefix
*pub_key = 4; // And set it to 4 to placate kernel
if ((err = crypto_akcipher_set_pub_key(tfm, pub_key, ctx->size + 1)) != 0) {
pr_info("SET PUB KEY FAILED: %d\n", -err);
goto failTfm;
}
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (IS_ERR(req)) {
pr_info("REQUEST ALLOC FAILED\n");
goto failTfm;
}
// We concatenate signature and hash and ship it to kernel
ber = kmalloc(ber_max_len + hash_size, GFP_KERNEL);
if (ber == NULL) {
goto failReq;
}
// XXX: NOTE THIS WILL WORK ONLY FOR 256 AND 384 bits. For larger keys
// length field will be longer than 1 byte and I haven't taken care of that!
// Signature
ber[ber_len++] = 0x30;
ber[ber_len++] = 2 * (2 + ctx->size / 2);
ber[ber_len++] = 0x02;
if (signature[0] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature, sig_size / 2);
ber_len += sig_size / 2;
ber[ber_len++] = 0x02;
if (signature[sig_size / 2] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature + sig_size / 2, sig_size / 2);
ber_len += sig_size / 2;
// Just append hash, for scatterlists it can't be on stack anyway
memcpy(ber + ber_len, message_hash, hash_size);
sg_init_one(&sg, ber, ber_len + hash_size);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
akcipher_request_set_crypt(req, &sg, NULL, ber_len, hash_size);
err = crypto_wait_req(crypto_akcipher_verify(req), &wait);
if (err != 0){
pr_info("Verify FAILED %d\n", -err);
}
kfree(ber);
failReq:
akcipher_request_free(req);
failTfm:
crypto_free_akcipher(tfm);
return err == 0;
#endif
}

View File

@@ -0,0 +1,158 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// RFC 5869 has some very non-intuitive points, reading it is advised
static bool lkca_hkdf_expand_only(struct crypto_shash *alg,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
int i;
uint8_t ctr = 1;
uint8_t tmp[HASH_MAX_DIGESTSIZE];
SHASH_DESC_ON_STACK(desc, alg);
desc->tfm = alg;
ret = crypto_shash_setkey(desc->tfm, prk, prk_size);
if (ret != 0) {
pr_info("key size mismatch %ld\n", prk_size);
return false;
}
for (i = 0, ctr = 1; i < out_size; i += prk_size, ctr++) {
ret = crypto_shash_init(desc);
if (ret) {
return false;
}
if (i != 0) {
ret = crypto_shash_update(desc, out + i - prk_size, prk_size);
if (ret) {
return false;
}
}
if (info_size > 0) {
ret = crypto_shash_update(desc, info, info_size);
if (ret) {
return false;
}
}
ret = crypto_shash_update(desc, &ctr, 1);
if (ret)
return false;
if ((out_size - i) < prk_size) {
ret = crypto_shash_final(desc, tmp);
if (ret) {
return false;
}
memcpy(out + i, tmp, out_size - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
ret = crypto_shash_final(desc, out + i);
if (ret) {
return false;
}
}
}
return true;
#endif
}
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret = 0;
struct crypto_shash *alg;
uint8_t prk[HASH_MAX_DIGESTSIZE];
if (key == NULL || salt == NULL || info == NULL || out == NULL ||
key_size > sizeof(prk) || salt_size > INT_MAX || info_size > INT_MAX ||
out_size > (sizeof(prk) * 255)) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, salt, salt_size);
if (ret != 0) {
goto out;
}
ret = crypto_shash_tfm_digest(alg, key, key_size, prk);
if (ret != 0) {
goto out;
}
ret = !lkca_hkdf_expand_only(alg, prk, crypto_shash_digestsize(alg), info, info_size, out, out_size);
out:
crypto_free_shash(alg);
return ret == 0;
#endif
}
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
bool ret = false;
struct crypto_shash *alg;
if (prk == NULL || info == NULL || out == NULL || prk_size > (512 / 8) ||
info_size > INT_MAX || (out_size > (prk_size * 255))) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = lkca_hkdf_expand_only(alg, prk, prk_size, info, info_size, out, out_size);
crypto_free_shash(alg);
return ret;
#endif
}

View File

@@ -0,0 +1,111 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_hkdf_sha256_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha256)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (256 / 8))
return false;
return libspdm_hmac_sha256_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha256)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha384)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (384 / 8))
return false;
return libspdm_hmac_sha384_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha384)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha512_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha512)", key, key_size,
salt, salt_size, info, info_size, out,
out_size);
}
bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (512 / 8))
return false;
return libspdm_hmac_sha512_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha512)", prk, prk_size, info, info_size,
out, out_size);
}

View File

@@ -0,0 +1,282 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_hmac_sha256_new(void)
{
return lkca_hash_new("hmac(sha256)");
}
void libspdm_hmac_sha256_free(void *hmac_sha256_ctx)
{
lkca_hash_free(hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha256_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha256_ctx, key, key_size);
}
bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx,
void *new_hmac_sha256_ctx)
{
if (hmac_sha256_ctx == NULL || new_hmac_sha256_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha256_ctx, hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha256_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha256_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha256_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha256_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha256)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha384_new(void)
{
return lkca_hash_new("hmac(sha384)");
}
void libspdm_hmac_sha384_free(void *hmac_sha384_ctx)
{
lkca_hash_free(hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha384_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha384_ctx, key, key_size);
}
bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx,
void *new_hmac_sha384_ctx)
{
if (hmac_sha384_ctx == NULL || new_hmac_sha384_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha384_ctx, hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha384_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha384_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha384_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha384_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha384)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha512_new(void)
{
return lkca_hash_new("hmac(sha512)");
}
void libspdm_hmac_sha512_free(void *hmac_sha512_ctx)
{
lkca_hash_free(hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha512_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha512_ctx, key, key_size);
}
bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx,
void *new_hmac_sha512_ctx)
{
if (new_hmac_sha512_ctx == NULL || new_hmac_sha512_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha512_ctx, hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha512_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha512_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha512_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha512_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha512)", key, key_size, data, data_size, hmac_value);
}

View File

@@ -0,0 +1,37 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// This is non-gpl symbol and not part of LKCA so no need to stub it out
bool libspdm_random_bytes(uint8_t *output, size_t size)
{
get_random_bytes(output, size);
return true;
}
// This is specifically allowed by spdm
bool libspdm_random_seed(const uint8_t *seed, size_t seed_size)
{
return true;
}

View File

@@ -0,0 +1,264 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_sha256_new(void)
{
return lkca_hash_new("sha256");
}
void libspdm_sha256_free(void *sha256_ctx)
{
lkca_hash_free(sha256_ctx);
}
bool libspdm_sha256_init(void *sha256_context)
{
return crypto_shash_init(sha256_context) == 0;
}
bool libspdm_sha256_duplicate(const void *sha256_context,
void *new_sha256_context)
{
if (sha256_context == NULL || new_sha256_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha256_context, sha256_context);
}
bool libspdm_sha256_update(void *sha256_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha256_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha256_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value)
{
int32_t ret;
if (sha256_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha256_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha256", data, data_size, hash_value);
}
void *libspdm_sha384_new(void)
{
return lkca_hash_new("sha384");
}
void libspdm_sha384_free(void *sha384_ctx)
{
lkca_hash_free(sha384_ctx);
}
bool libspdm_sha384_init(void *sha384_context)
{
return crypto_shash_init(sha384_context) == 0;
}
bool libspdm_sha384_duplicate(const void *sha384_context,
void *new_sha384_context)
{
if (sha384_context == NULL || new_sha384_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha384_context, sha384_context);
}
bool libspdm_sha384_update(void *sha384_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha384_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha384_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value)
{
int32_t ret;
if (sha384_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha384_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha384", data, data_size, hash_value);
}
void *libspdm_sha512_new(void)
{
return lkca_hash_new("sha512");
}
void libspdm_sha512_free(void *sha512_ctx)
{
lkca_hash_free(sha512_ctx);
}
bool libspdm_sha512_init(void *sha512_context)
{
return crypto_shash_init(sha512_context) == 0;
}
bool libspdm_sha512_duplicate(const void *sha512_context,
void *new_sha512_context)
{
if (sha512_context == NULL || new_sha512_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha512_context, sha512_context);
}
bool libspdm_sha512_update(void *sha512_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha512_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha512_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value)
{
int32_t ret;
if (sha512_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha512_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha512", data, data_size, hash_value);
}

View File

@@ -0,0 +1,160 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
void *lkca_hash_new(const char* alg_name)
{
#ifndef USE_LKCA
return false;
#else
//XXX: can we reuse crypto_shash part and just allocate desc
struct crypto_shash *alg;
struct shash_desc *desc;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
printk (KERN_INFO "Failed to alloc %s\n", alg_name);
return NULL;
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(alg), GFP_KERNEL);
if (desc == NULL){
printk (KERN_INFO "Kernel out of mem\n");
crypto_free_shash(alg);
return NULL;
}
desc->tfm = alg;
return desc;
#endif
}
void lkca_hash_free(struct shash_desc *ctx)
{
#ifndef USE_LKCA
#else
crypto_free_shash(ctx->tfm);
kfree(ctx);
#endif
}
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
SHASH_DESC_ON_STACK(tmp, src);
if (crypto_shash_export((struct shash_desc *) src, tmp)) {
return false;
}
if (crypto_shash_import(dst, tmp)) {
return false;
}
return true;
#endif
}
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
// in LKCA hmac export doesn't export ipad/opad, so we need to WAR it
struct crypto_shash *src_tfm = src->tfm;
struct crypto_shash *dst_tfm = dst->tfm;
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
int ss = crypto_shash_statesize(dst_tfm);
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);
return lkca_hash_duplicate(dst, src);
#endif
}
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
crypto_free_shash(alg);
return (ret == 0);
#endif
}
bool lkca_hmac_set_key(struct shash_desc *desc, const uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
ret = crypto_shash_setkey(desc->tfm, key, key_size);
if (ret == 0) {
ret = crypto_shash_init(desc);
}
return ret == 0;
#endif
}
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, key, key_size);
if (ret == 0){
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
}
crypto_free_shash(alg);
return (ret == 0);
#endif
}

View File

@@ -0,0 +1,456 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* libspdm_x509_verify_cert_chain, libspdm_x509_get_cert_from_cert_chain, check
* and prototypes taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
#endif
bool libspdm_x509_construct_certificate(const uint8_t *cert, size_t cert_size,
uint8_t **single_x509_cert)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_construct_certificate_stack(uint8_t **x509_stack, ...)
{
LIBSPDM_ASSERT(false);
return false;
}
void libspdm_x509_free(void *x509_cert)
{
LIBSPDM_ASSERT(false);
}
void libspdm_x509_stack_free(void *x509_stack)
{
LIBSPDM_ASSERT(false);
}
static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end,
size_t *length, uint32_t tag)
{
uint64_t max_len = end - ptr;
// Chain must be less than 1 GB
if ((max_len < 2) || (max_len > (1024 * 1024 * 1024))) {
return false;
}
// We only deal with universal and application tags
if (ptr[0] != tag) {
return false;
}
if (ptr[1] < 0x80) {
*length = ptr[1] + 2;
} else if (ptr[1] == 0x81) {
if (max_len < 3) {
return false;
}
*length = ptr[2] + 3;
} else if (ptr[1] == 0x82) {
if (max_len < 4) {
return false;
}
*length = (ptr[2] << 8) + ptr[3] + 4;
} else {
// In theory it could be bigger than 64KB
return false;
}
if (*length > max_len) {
return false;
}
return true;
}
bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length,
uint32_t tag)
{
return lkca_asn1_get_tag(*ptr, end, length, tag);
}
bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_organization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context)
{
LIBSPDM_ASSERT(false);
return false;
}
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context)
{
#ifdef USE_LKCA
bool ret = false;
uint32_t key_size = 0;
struct key_preparsed_payload lkca_cert;
struct public_key *pub;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
if (cert == NULL) {
return false;
}
if(key_type_asymmetric.preparse(&lkca_cert)) {
return false;
}
pub = lkca_cert.payload.data[asym_crypto];
// -1 is since lkca prepends '4' to public keys...
key_size = pub->keylen - 1;
if (key_size == (2 * 256 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP256R1);
} else if (key_size == (2 * 384 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP384R1);
} else {
goto err;
}
if (*ec_context == NULL) {
goto err;
}
// Again skip '4' in key to be in line with spdm protocol. We will add it
// back in ecda_verify
if (!lkca_ec_set_pub_key(*ec_context, (char *) pub->key + 1, key_size)) {
libspdm_ec_free(*ec_context);
goto err;
}
ret = true;
err:
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context)
{
LIBSPDM_ASSERT(false);
return false;
}
static int lkca_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
#ifdef USE_LKCA
int ret;
struct key_preparsed_payload lkca_cert;
struct key_preparsed_payload lkca_ca_cert;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
lkca_ca_cert.data = ca_cert;
lkca_ca_cert.datalen = ca_cert_size;
ret = key_type_asymmetric.preparse(&lkca_cert);
if (ret) {
return ret;
}
ret = key_type_asymmetric.preparse(&lkca_ca_cert);
if (ret) {
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
}
ret = public_key_verify_signature(lkca_ca_cert.payload.data[asym_crypto],
lkca_cert.payload.data[asym_auth]);
key_type_asymmetric.free_preparse(&lkca_cert);
key_type_asymmetric.free_preparse(&lkca_ca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
return lkca_x509_verify_cert(cert, cert_size, ca_cert, ca_cert_size) == 0;
}
bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain, size_t cert_chain_length)
{
size_t preceding_cert_len;
const uint8_t *preceding_cert;
size_t current_cert_len;
const uint8_t *current_cert;
bool verify_flag;
int ret;
verify_flag = false;
preceding_cert = root_cert;
preceding_cert_len = root_cert_length;
current_cert = cert_chain;
/* Get Current certificate from certificates buffer and Verify with preceding cert*/
do {
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &current_cert_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
ret = lkca_x509_verify_cert(current_cert, current_cert_len,
preceding_cert, preceding_cert_len);
if (ret != 0) {
verify_flag = false;
break;
} else {
verify_flag = true;
}
preceding_cert = current_cert;
preceding_cert_len = current_cert_len;
current_cert = current_cert + current_cert_len;
} while (true);
return verify_flag;
}
bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length)
{
size_t asn1_len;
int32_t current_index;
size_t current_cert_len;
const uint8_t *current_cert;
current_cert_len = 0;
/* Check input parameters.*/
if ((cert_chain == NULL) || (cert == NULL) || (cert_index < -1) ||
(cert_length == NULL)) {
return false;
}
current_cert = cert_chain;
current_index = -1;
/* Traverse the certificate chain*/
while (true) {
/* Get asn1 tag len*/
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &asn1_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
current_cert_len = asn1_len;
current_index++;
if (current_index == cert_index) {
*cert = current_cert;
*cert_length = current_cert_len;
return true;
}
current_cert = current_cert + current_cert_len;
}
/* If cert_index is -1, Return the last certificate*/
if (cert_index == -1 && current_index >= 0) {
*cert = current_cert - current_cert_len;
*cert_length = current_cert_len;
return true;
}
return false;
}
bool libspdm_x509_get_tbs_cert(const uint8_t *cert, size_t cert_size,
uint8_t **tbs_cert, size_t *tbs_cert_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size,
size_t *version)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_orgnization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
size_t cert_size, uint8_t *oid,
size_t *oid_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size,
size_t *usage)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_set_date_time(char const *date_time_str, void *date_time, size_t *date_time_size)
{
LIBSPDM_ASSERT(false);
return false;
}
int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2)
{
LIBSPDM_ASSERT(false);
return -3;
}
bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer)
{
LIBSPDM_ASSERT(false);
return false;
}

1493
kernel-open/nvidia/nv-acpi.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/backlight.h>
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level
(
nv_state_t *nv,
NvU32 *brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
*brightness = bd->props.brightness;
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level
(
nv_state_t *nv,
NvU32 brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
bd->props.brightness = brightness;
backlight_update_status(bd);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,108 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/bpmp.h>
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
/*!
* @brief Sends an MRQ (message-request) to BPMP
*
* The request, response, and ret parameters of this function correspond to the
* components of the tegra_bpmp_message struct, which BPMP uses to receive
* MRQs.
*
* @param[in] nv Per GPU Linux state
* @param[in] mrq MRQ_xxx ID specifying what is requested
* @param[in] request_data Pointer to request input data
* @param[in] request_data_size Size of structure pointed to by pRequestData
* @param[out] response_data Pointer to response output data
* @param[in] response_data_size Size of structure pointed to by pResponseData
* @param[out] ret MRQ return code (from "ret" element of
* tegra_bpmp_message struct)
* @param[out] api_ret Return code from tegra_bpmp_transfer call
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available,
* NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not
* be obtained from nv, or
* NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet
* for Linux error code).
*/
NV_STATUS NV_API_CALL
nv_bpmp_send_mrq
(
nv_state_t *nv,
NvU32 mrq,
const void *request_data,
NvU32 request_data_size,
void *response_data,
NvU32 response_data_size,
NvS32 *ret,
NvS32 *api_ret
)
{
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_INVALID_POINTER;
}
// Send the MRQ request to BPMP.
memset(&msg, 0, sizeof(msg));
msg.mrq = mrq;
msg.tx.data = request_data;
msg.tx.size = (size_t) request_data_size;
msg.rx.data = response_data;
msg.rx.size = (size_t) response_data_size;
*api_ret = (NvS32) tegra_bpmp_transfer(bpmp, &msg);
if (*api_ret == 0)
{
*ret = (NvS32) msg.rx.ret;
return NV_OK;
}
else
{
return NV_ERR_GENERIC;
}
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,852 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-caps.h"
#include "nv-procfs.h"
#include "nv-hash.h"
extern int NVreg_ModifyDeviceFiles;
/* sys_close() or __close_fd() */
#include <linux/syscalls.h>
#define NV_CAP_DRV_MINOR_COUNT 8192
/* Hash table with 512 buckets */
#define NV_CAP_HASH_BITS 9
NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS);
#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table)
#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE)
typedef struct nv_cap_table_entry
{
/* name must be the first element */
const char *name;
int minor;
struct hlist_node hlist;
} nv_cap_table_entry_t;
#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0]))
static nv_cap_table_entry_t g_nv_cap_nvlink_table[] =
{
{"/driver/nvidia-nvlink/capabilities/fabric-mgmt"}
};
static nv_cap_table_entry_t g_nv_cap_mig_table[] =
{
{"/driver/nvidia/capabilities/mig/config"},
{"/driver/nvidia/capabilities/mig/monitor"}
};
static nv_cap_table_entry_t g_nv_cap_sys_table[] =
{
};
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
{_gi "/ci0/access"}, \
{_gi "/ci1/access"}, \
{_gi "/ci2/access"}, \
{_gi "/ci3/access"}, \
{_gi "/ci4/access"}, \
{_gi "/ci5/access"}, \
{_gi "/ci6/access"}, \
{_gi "/ci7/access"}
#define NV_CAP_MIG_GI_ENTRIES(_gpu) \
{_gpu "/gi0/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \
{_gpu "/gi1/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \
{_gpu "/gi2/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \
{_gpu "/gi3/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \
{_gpu "/gi4/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \
{_gpu "/gi5/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \
{_gpu "/gi6/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \
{_gpu "/gi7/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \
{_gpu "/gi8/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \
{_gpu "/gi9/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \
{_gpu "/gi10/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \
{_gpu "/gi11/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \
{_gpu "/gi12/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \
{_gpu "/gi13/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \
{_gpu "/gi14/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14")
static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] =
{
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig")
};
struct nv_cap
{
char *path;
char *name;
int minor;
int permissions;
int modify;
struct proc_dir_entry *parent;
struct proc_dir_entry *entry;
};
#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128
typedef struct nv_cap_file_private
{
int minor;
int permissions;
int modify;
char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE];
off_t offset;
} nv_cap_file_private_t;
struct
{
NvBool initialized;
struct cdev cdev;
dev_t devno;
} g_nv_cap_drv;
#define NV_CAP_PROCFS_DIR "driver/nvidia-caps"
#define NV_CAP_NAME_BUF_SIZE 128
static struct proc_dir_entry *nv_cap_procfs_dir;
static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_nvlink_table[i].name,
"/driver/nvidia-nvlink/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_sys_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_sys_table[i].name,
"/driver/nvidia/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_mig_minors(struct seq_file *s, void *v)
{
int i, count, gpu;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_table[i].name,
"/driver/nvidia/capabilities/mig/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor);
}
}
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_gpu_table[i].name,
"/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "gpu%d/%s %d\n",
gpu, name, g_nv_cap_mig_gpu_table[i].minor);
}
}
return 0;
}
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock);
static void nv_cap_procfs_exit(void)
{
if (!nv_cap_procfs_dir)
{
return;
}
#if defined(CONFIG_PROC_FS)
proc_remove(nv_cap_procfs_dir);
#endif
nv_cap_procfs_dir = NULL;
}
int nv_cap_procfs_init(void)
{
static struct proc_dir_entry *file_entry;
nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL);
if (nv_cap_procfs_dir == NULL)
{
return -EACCES;
}
file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir,
mig_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir,
nvlink_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir,
sys_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
return 0;
cleanup:
nv_cap_procfs_exit();
return -EACCES;
}
static int nv_cap_find_minor(char *path)
{
unsigned int key = nv_cap_hash_key(path);
nv_cap_table_entry_t *entry;
nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key)
{
if (strcmp(path, entry->name) == 0)
{
return entry->minor;
}
}
return -1;
}
static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count)
{
int i;
unsigned int key;
static int minor = 0;
for (i = 0; i < count; i++)
{
table[i].minor = minor++;
INIT_HLIST_NODE(&table[i].hlist);
key = nv_cap_hash_key(table[i].name);
nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key);
}
WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT);
}
#define nv_cap_table_init(table) \
_nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table))
static void nv_cap_tables_init(void)
{
BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0);
nv_hash_init(g_nv_cap_hash_table);
nv_cap_table_init(g_nv_cap_nvlink_table);
nv_cap_table_init(g_nv_cap_mig_table);
nv_cap_table_init(g_nv_cap_mig_gpu_table);
nv_cap_table_init(g_nv_cap_sys_table);
}
static ssize_t nv_cap_procfs_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *pos)
{
nv_cap_file_private_t *private = NULL;
unsigned long bytes_left;
char *proc_buffer;
private = ((struct seq_file *)file->private_data)->private;
bytes_left = (sizeof(private->buffer) - private->offset - 1);
if (count == 0)
{
return -EINVAL;
}
if ((bytes_left == 0) || (count > bytes_left))
{
return -ENOSPC;
}
proc_buffer = &private->buffer[private->offset];
if (copy_from_user(proc_buffer, buffer, count))
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n");
return -EFAULT;
}
private->offset += count;
proc_buffer[count] = '\0';
*pos = private->offset;
return count;
}
static int nv_cap_procfs_read(struct seq_file *s, void *v)
{
nv_cap_file_private_t *private = s->private;
seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor);
seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions);
seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify);
return 0;
}
static int nv_cap_procfs_open(struct inode *inode, struct file *file)
{
nv_cap_file_private_t *private = NULL;
int rc;
nv_cap_t *cap = NV_PDE_DATA(inode);
NV_KMALLOC(private, sizeof(nv_cap_file_private_t));
if (private == NULL)
{
return -ENOMEM;
}
private->minor = cap->minor;
private->permissions = cap->permissions;
private->offset = 0;
private->modify = cap->modify;
rc = single_open(file, nv_cap_procfs_read, private);
if (rc < 0)
{
NV_KFREE(private, sizeof(nv_cap_file_private_t));
return rc;
}
rc = nv_down_read_interruptible(&nv_system_pm_lock);
if (rc < 0)
{
single_release(inode, file);
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
return rc;
}
static int nv_cap_procfs_release(struct inode *inode, struct file *file)
{
struct seq_file *s = file->private_data;
nv_cap_file_private_t *private = NULL;
char *buffer;
int modify;
nv_cap_t *cap = NV_PDE_DATA(inode);
if (s != NULL)
{
private = s->private;
}
up_read(&nv_system_pm_lock);
single_release(inode, file);
if (private != NULL)
{
buffer = private->buffer;
if (private->offset != 0)
{
if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1)
{
cap->modify = modify;
}
}
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
/*
* All open files using the proc entry will be invalidated
* if the entry is removed.
*/
file->private_data = NULL;
return 0;
}
static nv_proc_ops_t g_nv_cap_procfs_fops = {
NV_PROC_OPS_SET_OWNER()
.NV_PROC_OPS_OPEN = nv_cap_procfs_open,
.NV_PROC_OPS_RELEASE = nv_cap_procfs_release,
.NV_PROC_OPS_WRITE = nv_cap_procfs_write,
.NV_PROC_OPS_READ = seq_read,
.NV_PROC_OPS_LSEEK = seq_lseek,
};
/* forward declaration of g_nv_cap_drv_fops */
static struct file_operations g_nv_cap_drv_fops;
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
{
struct file *file;
int dup_fd;
struct inode *inode = NULL;
dev_t rdev = 0;
struct files_struct *files = current->files;
struct fdtable *fdt;
if (cap == NULL)
{
return -1;
}
file = fget(fd);
if (file == NULL)
{
return -1;
}
inode = NV_FILE_INODE(file);
if (inode == NULL)
{
goto err;
}
/* Make sure the fd belongs to the nv-cap-drv */
if (file->f_op != &g_nv_cap_drv_fops)
{
goto err;
}
/* Make sure the fd has the expected capability */
rdev = inode->i_rdev;
if (MINOR(rdev) != cap->minor)
{
goto err;
}
dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC);
if (dup_fd < 0)
{
dup_fd = NV_GET_UNUSED_FD();
if (dup_fd < 0)
{
goto err;
}
/*
* Set CLOEXEC before installing the FD.
*
* If fork() happens in between, the opened unused FD will have
* a NULL struct file associated with it, which is okay.
*
* The only well known bug here is the race with dup(2), which is
* already documented in the kernel, see fd_install()'s description.
*/
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
__set_bit(dup_fd, fdt->close_on_exec);
spin_unlock(&files->file_lock);
}
fd_install(dup_fd, file);
return dup_fd;
err:
fput(file);
return -1;
}
void NV_API_CALL nv_cap_close_fd(int fd)
{
if (fd == -1)
{
return;
}
/*
* Acquire task_lock as we access current->files explicitly (__close_fd)
* and implicitly (sys_close), and it will race with the exit path.
*/
task_lock(current);
/* Nothing to do, we are in exit path */
if (current->files == NULL)
{
task_unlock(current);
return;
}
/*
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
* and started exporting __close_fd, as of this commit:
* 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel
* calls to sys_close()")
* Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started
* exporting close_fd, as of this commit:
* 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove
* the files parameter")
*/
#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd
close_fd(fd);
#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd
__close_fd(current->files, fd);
#else
sys_close(fd);
#endif
task_unlock(current);
}
static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name)
{
nv_cap_t *cap;
int len;
if (parent_cap == NULL || name == NULL)
{
return NULL;
}
NV_KMALLOC(cap, sizeof(nv_cap_t));
if (cap == NULL)
{
return NULL;
}
len = strlen(name) + strlen(parent_cap->path) + 2;
NV_KMALLOC(cap->path, len);
if (cap->path == NULL)
{
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->path, parent_cap->path);
strcat(cap->path, "/");
strcat(cap->path, name);
len = strlen(name) + 1;
NV_KMALLOC(cap->name, len);
if (cap->name == NULL)
{
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->name, name);
cap->minor = -1;
cap->modify = NVreg_ModifyDeviceFiles;
return cap;
}
static void nv_cap_free(nv_cap_t *cap)
{
if (cap == NULL)
{
return;
}
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap->name, strlen(cap->name) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
}
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
int minor;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
mode = (S_IFREG | S_IRUGO);
minor = nv_cap_find_minor(cap->path);
if (minor < 0)
{
nv_cap_free(cap);
return NULL;
}
cap->minor = minor;
cap->entry = proc_create_data(name, mode, parent_cap->entry,
&g_nv_cap_procfs_fops, (void*)cap);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
cap->minor = -1;
mode = (S_IFDIR | S_IRUGO | S_IXUGO);
cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_init(const char *path)
{
nv_cap_t parent_cap;
nv_cap_t *cap;
int mode;
char *name = NULL;
char dir[] = "/capabilities";
if (path == NULL)
{
return NULL;
}
NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1);
if (name == NULL)
{
return NULL;
}
strcpy(name, path);
strcat(name, dir);
parent_cap.entry = NULL;
parent_cap.path = "";
parent_cap.name = "";
mode = S_IRUGO | S_IXUGO;
cap = nv_cap_create_dir_entry(&parent_cap, name, mode);
NV_KFREE(name, strlen(name) + 1);
return cap;
}
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap)
{
if (WARN_ON(cap == NULL))
{
return;
}
remove_proc_entry(cap->name, cap->parent);
nv_cap_free(cap);
}
static int nv_cap_drv_open(struct inode *inode, struct file *file)
{
return 0;
}
static int nv_cap_drv_release(struct inode *inode, struct file *file)
{
return 0;
}
static struct file_operations g_nv_cap_drv_fops =
{
.owner = THIS_MODULE,
.open = nv_cap_drv_open,
.release = nv_cap_drv_release
};
int NV_API_CALL nv_cap_drv_init(void)
{
int rc;
nv_cap_tables_init();
if (g_nv_cap_drv.initialized)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n");
return -EBUSY;
}
rc = alloc_chrdev_region(&g_nv_cap_drv.devno,
0,
NV_CAP_DRV_MINOR_COUNT,
"nvidia-caps");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n");
return rc;
}
cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops);
g_nv_cap_drv.cdev.owner = THIS_MODULE;
rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno,
NV_CAP_DRV_MINOR_COUNT);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n");
goto cdev_add_fail;
}
rc = nv_cap_procfs_init();
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n");
goto proc_init_fail;
}
g_nv_cap_drv.initialized = NV_TRUE;
return 0;
proc_init_fail:
cdev_del(&g_nv_cap_drv.cdev);
cdev_add_fail:
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
return rc;
}
void NV_API_CALL nv_cap_drv_exit(void)
{
if (!g_nv_cap_drv.initialized)
{
return;
}
nv_cap_procfs_exit();
cdev_del(&g_nv_cap_drv.cdev);
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
g_nv_cap_drv.initialized = NV_FALSE;
}

738
kernel-open/nvidia/nv-clk.c Normal file
View File

@@ -0,0 +1,738 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT)
#include <soc/tegra/bpmp-abi.h>
#endif
#if defined(NV_SOC_TEGRA_BPMP_H_PRESENT)
#include <soc/tegra/bpmp.h>
#endif
/*!
* @brief The below defined static const array points to the
* clock mentioned in enum defined in below file.
*
* arch/nvalloc/unix/include/nv.h
* enum TEGRASOC_WHICH_CLK
*
* The order should be maintained/updated together.
*/
static const char *osMapClk[] = {
[TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk",
[TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk",
[TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk",
[TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk",
[TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk",
[TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk",
[TEGRASOC_WHICH_CLK_RG0] = "rg0_clk",
[TEGRASOC_WHICH_CLK_RG1] = "rg1_clk",
[TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk",
[TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk",
[TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk",
[TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk",
[TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk",
[TEGRASOC_WHICH_CLK_DP_LINK_REF] = "dp_link_ref_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk",
[TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk",
[TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk",
[TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk",
[TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk",
[TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk",
[TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk",
[TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk",
[TEGRASOC_WHICH_CLK_SOR_PAD_INPUT] = "sor_pad_input_clk",
[TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk",
[TEGRASOC_WHICH_CLK_SF0] = "sf0_clk",
[TEGRASOC_WHICH_CLK_SF1] = "sf1_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk",
[TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk",
[TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk",
[TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk",
[TEGRASOC_WHICH_CLK_OSC] = "osc_clk",
[TEGRASOC_WHICH_CLK_DSC] = "dsc_clk",
[TEGRASOC_WHICH_CLK_MAUD] = "maud_clk",
[TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk",
[TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk",
[TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk",
[TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk",
[TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk",
[TEGRASOC_WHICH_CLK_DISP_ROOT] = "disp_root",
[TEGRASOC_WHICH_CLK_HUB_ROOT] = "hub_root",
[TEGRASOC_WHICH_CLK_PLLA_DISP] = "plla_disp",
[TEGRASOC_WHICH_CLK_PLLA_DISPHUB] = "plla_disphub",
[TEGRASOC_WHICH_CLK_PLLA] = "plla",
};
/*!
* @brief Get the clock handles.
*
* Look up and obtain the clock handles for each display
* clock at boot-time and later using all those handles
* for rest of the operations. for example, enable/disable
* clocks, get current/max frequency of the clock.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_clk_get_handles(
nv_state_t *nv)
{
NV_STATUS status = NV_OK;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i, j, clk_count;
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
struct clk_bulk_data *clks;
clk_count = devm_clk_bulk_get_all(nvl->dev, &clks);
if (clk_count == 0)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to get clk handles from devm_clk_bulk_get_all\n");
return NV_ERR_OBJECT_NOT_FOUND;
}
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < clk_count; i++)
{
for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++)
{
if (!strcmp(osMapClk[j], clks[i].id))
{
nvl->disp_clk_handles.clk[j].handles = clks[i].clk;
nvl->disp_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk);
break;
}
}
if (j == TEGRASOC_WHICH_CLK_MAX)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id);
return NV_ERR_OBJECT_NOT_FOUND;
}
}
#else
nv_printf(NV_DBG_ERRORS, "NVRM: devm_clk_bulk_get_all API is not present\n");
status = NV_ERR_OBJECT_NOT_FOUND;
#endif
return status;
}
/*!
* @brief Clear the clock handles assigned by nv_clk_get_handles()
*
* Clear the clock handle for each display of the clocks at shutdown-time.
* Since clock handles are obtained by devm managed devm_clk_bulk_get_all()
* API, devm_clk_bulk_release_all() API is called on all the enumerated
* clk handles automatically when module gets unloaded. Hence, no need
* to explicitly free those handles.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*/
void NV_API_CALL nv_clk_clear_handles(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i;
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (nvl->disp_clk_handles.clk[i].handles != NULL)
{
nvl->disp_clk_handles.clk[i].handles = NULL;
}
}
}
/*!
* @brief Enable the clock.
*
* Enabling the clock before performing any operation
* on it. The below function will prepare the clock for use
* and enable them.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_enable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_prepare_enable(nvl->disp_clk_handles.clk[whichClkOS].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Check if clock is enable or not.
*
* Checking the clock status if it is enabled or not before
* enabling or disabling it.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns clock status.
*/
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
bool ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n");
return NV_FALSE;
}
ret = __clk_is_enabled(nvl->disp_clk_handles.clk[whichClkOS].handles);
return ret == true;
}
/*!
* @brief Disable the clock.
*
* Disabling the clock after performing operation or required
* work with that clock is done with that particular clock.
* The below function will unprepare the clock for further use
* and disable them.
*
* Note: make sure to disable clock before clk_put is called.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*/
void NV_API_CALL nv_disable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
clk_disable_unprepare(nvl->disp_clk_handles.clk[whichClkOS].handles);
}
/*!
* @brief Get current clock frequency.
*
* Obtain the current clock rate for a clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pCurrFreqKHz Current clock frequency
*/
NV_STATUS NV_API_CALL nv_get_curr_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
unsigned long currFreqHz;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
currFreqHz = clk_get_rate(nvl->disp_clk_handles.clk[whichClkOS].handles);
*pCurrFreqKHz = currFreqHz / 1000U;
if (*pCurrFreqKHz > 0U)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get maximum clock frequency.
*
* Obtain the maximum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMaxFreqKHz Maximum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_max_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
long ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the maximum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// maximum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending NV_S64_MAX will return maxFreq.
//
ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, NV_U32_MAX);
if (ret >= 0)
{
*pMaxFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get minimum clock frequency.
*
* Obtain the minimum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMinFreqKHz Minimum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_min_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
long ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the minimum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// minimum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < minFreq currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending 0 will return minFreq.
//
ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, 0);
if (ret >= 0)
{
*pMinFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set clock frequency.
*
* Setting the frequency of clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[in] reqFreqKHz Required frequency
*/
NV_STATUS NV_API_CALL nv_set_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_set_rate(nvl->disp_clk_handles.clk[whichClkOS].handles,
reqFreqKHz * 1000U);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set parent clock.
*
* Setting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] whichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if ((nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL) &&
(nvl->disp_clk_handles.clk[whichClkOSparent].handles != NULL))
{
ret = clk_set_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles,
nvl->disp_clk_handles.clk[whichClkOSparent].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief get parent clock.
*
* Getting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] pWhichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
struct clk *ret;;
NvU32 i;
if (nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL)
{
ret = clk_get_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles);
if (!IS_ERR(ret))
{
const char *parentClkName = __clk_get_name(ret);
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (!strcmp(nvl->disp_clk_handles.clk[i].clkName, parentClkName))
{
*pWhichClkOSparent = i;
return NV_OK;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret);
return NV_ERR_INVALID_OBJECT_PARENT;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret));
return NV_ERR_INVALID_POINTER;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n");
return NV_ERR_OBJECT_NOT_FOUND;
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init
(
nv_state_t *nv,
NvU32 link_rate,
NvU32 lanes_bitmap
)
{
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_INIT;
req.display_port_init.link_rate = link_rate;
req.display_port_init.lanes_bitmap = lanes_bitmap;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv)
{
#if defined(NV_SOC_TEGRA_BPMP_ABI_H_PRESENT) && defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
struct mrq_uphy_response resp;
int rc;
NV_STATUS status = NV_OK;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
req.cmd = CMD_UPHY_DISPLAY_PORT_OFF;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_UPHY;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc)
{
nv_printf(NV_DBG_ERRORS, "DP UPHY pll de-initialization failed, rc - %d\n", rc);
status = NV_ERR_GENERIC;
}
tegra_bpmp_put(bpmp);
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,217 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(CONFIG_CRAY_XT)
enum {
NV_FORMAT_STATE_ORDINARY,
NV_FORMAT_STATE_INTRODUCTION,
NV_FORMAT_STATE_FLAGS,
NV_FORMAT_STATE_FIELD_WIDTH,
NV_FORMAT_STATE_PRECISION,
NV_FORMAT_STATE_LENGTH_MODIFIER,
NV_FORMAT_STATE_CONVERSION_SPECIFIER
};
enum {
NV_LENGTH_MODIFIER_NONE,
NV_LENGTH_MODIFIER_CHAR,
NV_LENGTH_MODIFIER_SHORT_INT,
NV_LENGTH_MODIFIER_LONG_INT,
NV_LENGTH_MODIFIER_LONG_LONG_INT
};
#define NV_IS_FLAG(c) \
((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+')
#define NV_IS_LENGTH_MODIFIER(c) \
((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \
(c) == 'z' || (c) == 't')
#define NV_IS_CONVERSION_SPECIFIER(c) \
((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \
(c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \
(c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \
(c) == 's' || (c) == 'p')
#define NV_MAX_NUM_INFO_MMRS 6
NV_STATUS nvos_forward_error_to_cray(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
NvU32 num_info_mmrs;
NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS];
int state = NV_FORMAT_STATE_ORDINARY;
int modifier = NV_LENGTH_MODIFIER_NONE;
NvU32 i, n = 0, m = 0;
memset(info_mmrs, 0, sizeof(info_mmrs));
while (*format != '\0')
{
switch (state)
{
case NV_FORMAT_STATE_ORDINARY:
if (*format == '%')
state = NV_FORMAT_STATE_INTRODUCTION;
break;
case NV_FORMAT_STATE_INTRODUCTION:
if (*format == '%')
{
state = NV_FORMAT_STATE_ORDINARY;
break;
}
case NV_FORMAT_STATE_FLAGS:
if (NV_IS_FLAG(*format))
{
state = NV_FORMAT_STATE_FLAGS;
break;
}
else if (*format == '*')
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
case NV_FORMAT_STATE_FIELD_WIDTH:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
else if (*format == '.')
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
case NV_FORMAT_STATE_PRECISION:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
else if (NV_IS_LENGTH_MODIFIER(*format))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
case NV_FORMAT_STATE_LENGTH_MODIFIER:
if ((*format == 'h') || (*format == 'l'))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
}
switch (state)
{
case NV_FORMAT_STATE_INTRODUCTION:
modifier = NV_LENGTH_MODIFIER_NONE;
break;
case NV_FORMAT_STATE_LENGTH_MODIFIER:
switch (*format)
{
case 'h':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_SHORT_INT
: NV_LENGTH_MODIFIER_CHAR;
break;
case 'l':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_LONG_INT
: NV_LENGTH_MODIFIER_LONG_LONG_INT;
break;
case 'q':
modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT;
default:
return NV_ERR_INVALID_ARGUMENT;
}
break;
case NV_FORMAT_STATE_CONVERSION_SPECIFIER:
switch (*format)
{
case 'c':
case 'd':
case 'i':
x = (unsigned int)va_arg(ap, int);
break;
case 'o':
case 'u':
case 'x':
case 'X':
switch (modifier)
{
case NV_LENGTH_MODIFIER_LONG_LONG_INT:
x = va_arg(ap, unsigned long long int);
break;
case NV_LENGTH_MODIFIER_LONG_INT:
x = va_arg(ap, unsigned long int);
break;
case NV_LENGTH_MODIFIER_CHAR:
case NV_LENGTH_MODIFIER_SHORT_INT:
case NV_LENGTH_MODIFIER_NONE:
x = va_arg(ap, unsigned int);
break;
}
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
state = NV_FORMAT_STATE_ORDINARY;
for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT)
? 2 : 1); i++)
{
if (m == NV_MAX_NUM_INFO_MMRS)
return NV_ERR_INSUFFICIENT_RESOURCES;
info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff));
x >>= 32;
if (++n == 2)
{
m++;
n = 0;
}
}
}
format++;
}
num_info_mmrs = (m + (n != 0));
if (num_info_mmrs > 0)
cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs);
return NV_OK;
}
#endif

1251
kernel-open/nvidia/nv-dma.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,992 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "os_dsi_panel_props.h"
int bl_name_len;
static u32 *dsi_read_prop_array
(
const struct device_node *np,
struct property *prop,
u32 *array_size
)
{
u32 *val_array = NULL;
u32 count = 0;
u32 i = 0;
int ret = 0;
if (!prop)
return NULL;
#if defined(NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT)
count = of_property_count_elems_of_size(np, prop->name, sizeof(u32));
#else
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, of_property_count_elems_of_size not present\n");
return ERR_PTR(-ENOSYS);
#endif
if (count > 0)
{
NV_KMALLOC(val_array, sizeof(u32) * count);
if (val_array == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to allocate memory for values of DSI property %s", prop->name);
return ERR_PTR(-ENOMEM);
}
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to get elements count in property %s\n", prop->name);
return ERR_PTR(-ENOSYS);
}
#if defined(NV_OF_PROPERTY_READ_VARIABLE_U32_ARRAY_PRESENT)
ret = of_property_read_variable_u32_array(np, prop->name,
val_array, 0, count);
#else
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, of_property_read_variable_u32_array not present\n");
ret = -ENOSYS;
#endif
if (IS_ERR(&ret))
{
nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to read property %s", prop->name);
NV_KFREE(val_array, sizeof(u32) * count);
val_array = NULL;
return ERR_PTR(ret);
}
*array_size = count;
return val_array;
}
static int dsi_get_panel_timings(struct device_node *np_panel, DSI_PANEL_INFO *panelInfo)
{
struct device_node *np = NULL;
NvU32 temp;
DSITIMINGS *modes = &panelInfo->dsiTimings;
// Get Panel Node from active-panel phandle
np = of_parse_phandle(np_panel, "nvidia,panel-timings", 0);
if (!np) {
nv_printf(NV_DBG_ERRORS, "NVRM: could not find panel timings node for DSI Panel\n");
return -ENOENT;
}
if (!of_property_read_u32(np, "clock-frequency", &temp)) {
modes->pixelClkRate = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "hsync-len", &temp)) {
modes->hSyncWidth = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "vsync-len", &temp)) {
modes->vSyncWidth = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "hback-porch", &temp)) {
modes->hBackPorch = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "vback-porch", &temp)) {
modes->vBackPorch = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "hactive", &temp)) {
modes->hActive = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "vactive", &temp)) {
modes->vActive = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "hfront-porch", &temp)) {
modes->hFrontPorch = temp;
} else {
goto parse_mode_timings_fail;
}
if (!of_property_read_u32(np, "vfront-porch", &temp)) {
modes->vFrontPorch = temp;
} else {
goto parse_mode_timings_fail;
}
of_node_put(np);
return 0U;
parse_mode_timings_fail:
nv_printf(NV_DBG_ERRORS, "NVRM: One of the mode timings is missing in DSI Panel mode-timings!\n");
of_node_put(np);
return -ENOENT;
}
static int dsi_get_panel_gpio(struct device_node *node, DSI_PANEL_INFO *panel)
{
int count;
char *label = NULL;
// If gpios are already populated, just return
if (panel->panel_gpio_populated)
return 0;
if (!node) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI Panel node not available\n");
return -ENOENT;
}
#if defined(NV_OF_GET_NAME_GPIO_PRESENT)
panel->panel_gpio[DSI_GPIO_LCD_RESET] =
of_get_named_gpio(node, "nvidia,panel-rst-gpio", 0);
panel->panel_gpio[DSI_GPIO_PANEL_EN] =
of_get_named_gpio(node, "nvidia,panel-en-gpio", 0);
panel->panel_gpio[DSI_GPIO_PANEL_EN_1] =
of_get_named_gpio(node, "nvidia,panel-en-1-gpio", 0);
panel->panel_gpio[DSI_GPIO_BL_ENABLE] =
of_get_named_gpio(node, "nvidia,panel-bl-en-gpio", 0);
panel->panel_gpio[DSI_GPIO_BL_PWM] =
of_get_named_gpio(node, "nvidia,panel-bl-pwm-gpio", 0);
panel->panel_gpio[DSI_GPIO_TE] =
of_get_named_gpio(node, "nvidia,te-gpio", 0);
panel->panel_gpio[DSI_GPIO_AVDD_AVEE_EN] =
of_get_named_gpio(node, "nvidia,avdd-avee-en-gpio", 0);
panel->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN] =
of_get_named_gpio(node, "nvidia,vdd-1v8-lcd-en-gpio", 0);
panel->panel_gpio[DSI_GPIO_BRIDGE_EN_0] =
of_get_named_gpio(node, "nvidia,panel-bridge-en-0-gpio", 0);
panel->panel_gpio[DSI_GPIO_BRIDGE_EN_1] =
of_get_named_gpio(node, "nvidia,panel-bridge-en-1-gpio", 0);
panel->panel_gpio[DSI_GPIO_BRIDGE_REFCLK_EN] =
of_get_named_gpio(node, "nvidia,panel-bridge-refclk-en-gpio", 0);
for (count = 0; count < DSI_N_GPIO_PANEL; count++) {
if (gpio_is_valid(panel->panel_gpio[count])) {
switch (count) {
case DSI_GPIO_LCD_RESET:
label = "dsi-panel-reset";
break;
case DSI_GPIO_PANEL_EN:
label = "dsi-panel-en";
break;
case DSI_GPIO_PANEL_EN_1:
label = "dsi-panel-en-1";
break;
case DSI_GPIO_BL_ENABLE:
label = "dsi-panel-bl-enable";
break;
case DSI_GPIO_BL_PWM:
label = "dsi-panel-pwm";
break;
case DSI_GPIO_TE:
if (panel->dsiEnVRR != NV_TRUE) {
panel->panel_gpio[count] = -1;
} else {
label = "dsi-panel-te";
panel->dsiVrrPanelSupportsTe = NV_TRUE;
}
break;
case DSI_GPIO_AVDD_AVEE_EN:
label = "dsi-panel-avdd-avee-en";
break;
case DSI_GPIO_VDD_1V8_LCD_EN:
label = "dsi-panel-vdd-1v8-lcd-en";
break;
case DSI_GPIO_BRIDGE_EN_0:
label = "dsi-panel-bridge-en-0";
break;
case DSI_GPIO_BRIDGE_EN_1:
label = "dsi-panel-bridge-en-1";
break;
case DSI_GPIO_BRIDGE_REFCLK_EN:
label = "dsi-panel-bridge-refclk-en";
break;
default:
nv_printf(NV_DBG_INFO, "NVRM: DSI Panel invalid gpio entry at index %d\n", count);
}
if (label) {
gpio_request(panel->panel_gpio[count], label);
label = NULL;
}
}
}
panel->panel_gpio_populated = true;
return 0U;
#else
return -EINVAL;
#endif
}
static int dsi_parse_pps_data
(
const struct device_node *node,
struct property *prop,
NvU32 *pps
)
{
__be32 *prop_val_ptr;
u32 count = 0;
if (!prop)
return -ENOENT;
prop_val_ptr = prop->value;
#define PPS_COUNT 32
for (count = 0; count < PPS_COUNT; count++) {
pps[count] = be32_to_cpu(*prop_val_ptr++);
}
#undef PPS_COUNT
return 0U;
}
static int parse_dsi_properties(const struct device_node *np_dsi, DSI_PANEL_INFO *dsi)
{
u32 temp;
int ret = 0;
const __be32 *p;
struct property *prop;
struct device_node *np_dsi_panel;
// Get Panel Node from active-panel phandle
np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0);
if (np_dsi_panel == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: None of the dsi panel nodes enabled in DT!\n");
return -EINVAL;
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,enable-hs-clk-in-lp-mode", &temp))
dsi->enable_hs_clock_on_lp_cmd_mode = (u8)temp;
if (of_property_read_bool(np_dsi_panel,
"nvidia,set-max-dsi-timeout"))
dsi->set_max_timeout = true;
if (of_property_read_bool(np_dsi_panel,
"nvidia,use-legacy-dphy-core"))
dsi->use_legacy_dphy_core = true;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-refresh-rate-adj", &temp))
dsi->refresh_rate_adj = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-data-lanes", &temp))
dsi->n_data_lanes = (u8)temp;
if (of_property_read_bool(np_dsi_panel,
"nvidia,swap-data-lane-polarity"))
dsi->swap_data_lane_polarity = true;
if (of_property_read_bool(np_dsi_panel,
"nvidia,swap-clock-lane-polarity"))
dsi->swap_clock_lane_polarity = true;
if (of_property_read_bool(np_dsi_panel,
"nvidia,reverse-clock-polarity"))
dsi->reverse_clock_polarity = true;
if (!of_property_read_u32_array(np_dsi_panel,
"nvidia,lane-xbar-ctrl",
dsi->lane_xbar_ctrl, dsi->n_data_lanes))
dsi->lane_xbar_exists = true;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-type", &temp))
{
dsi->dsiPhyType = (u8)temp;
if ((temp != DSI_DPHY) &&
(temp != DSI_CPHY))
{
nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi phy type 0x%x\n", temp);
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (of_property_read_bool(np_dsi_panel,
"nvidia,cphy-data-scrambling"))
dsi->en_data_scrambling = true;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-video-burst-mode", &temp))
{
dsi->video_burst_mode = (u8)temp;
if ((temp != DSI_VIDEO_NON_BURST_MODE) &&
(temp != DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END) &&
(temp != DSI_VIDEO_BURST_MODE_LOWEST_SPEED) &&
(temp != DSI_VIDEO_BURST_MODE_LOW_SPEED) &&
(temp != DSI_VIDEO_BURST_MODE_MEDIUM_SPEED) &&
(temp != DSI_VIDEO_BURST_MODE_FAST_SPEED) &&
(temp != DSI_VIDEO_BURST_MODE_FASTEST_SPEED))
{
nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi video burst mode\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-pixel-format", &temp))
{
dsi->pixel_format = (u8)temp;
if ((temp != DSI_PIXEL_FORMAT_16BIT_P) &&
(temp != DSI_PIXEL_FORMAT_18BIT_P) &&
(temp != DSI_PIXEL_FORMAT_18BIT_NP) &&
(temp != DSI_PIXEL_FORMAT_24BIT_P) &&
(temp != DSI_PIXEL_FORMAT_30BIT_P) &&
(temp != DSI_PIXEL_FORMAT_36BIT_P) &&
(temp != DSI_PIXEL_FORMAT_8BIT_DSC) &&
(temp != DSI_PIXEL_FORMAT_10BIT_DSC) &&
(temp != DSI_PIXEL_FORMAT_12BIT_DSC) &&
(temp != DSI_PIXEL_FORMAT_16BIT_DSC))
{
nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi pixel format\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-refresh-rate", &temp))
dsi->refresh_rate = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-rated-refresh-rate", &temp))
dsi->rated_refresh_rate = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-virtual-channel", &temp))
{
dsi->virtual_channel = (u8)temp;
if ((temp != DSI_VIRTUAL_CHANNEL_0) &&
(temp != DSI_VIRTUAL_CHANNEL_1) &&
(temp != DSI_VIRTUAL_CHANNEL_2) &&
(temp != DSI_VIRTUAL_CHANNEL_3))
{
nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi virtual channel\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (!of_property_read_u32(np_dsi_panel, "nvidia,dsi-instance", &temp))
dsi->dsi_instance = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-panel-reset", &temp))
dsi->panel_reset = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-te-polarity-low", &temp))
dsi->te_polarity_low = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-lp00-pre-panel-wakeup", &temp))
dsi->lp00_pre_panel_wakeup = (u8)temp;
if (of_find_property(np_dsi_panel,
"nvidia,dsi-bl-name", &bl_name_len))
{
NV_KMALLOC(dsi->bl_name, sizeof(u8) * bl_name_len);
if (!of_property_read_string(np_dsi_panel,
"nvidia,dsi-bl-name",
(const char **)&dsi->bl_name)) {
} else {
nv_printf(NV_DBG_ERRORS, "NVRM: dsi error parsing bl name\n");
NV_KFREE(dsi->bl_name, sizeof(u8) * bl_name_len);
}
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-ganged-type", &temp)) {
dsi->ganged_type = (u8)temp;
/* Set pixel width to 1 by default for even-odd split */
if (dsi->ganged_type == DSI_GANGED_SYMMETRIC_EVEN_ODD)
dsi->even_odd_split_width = 1;
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-even-odd-pixel-width", &temp))
dsi->even_odd_split_width = temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-ganged-overlap", &temp)) {
dsi->ganged_overlap = (u16)temp;
if (!dsi->ganged_type)
nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged overlap, but no ganged type\n");
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-ganged-swap-links", &temp)) {
dsi->ganged_swap_links = (bool)temp;
if (!dsi->ganged_type)
nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged swapped links, but no ganged type\n");
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-ganged-write-to-all-links", &temp)) {
dsi->ganged_write_to_all_links = (bool)temp;
if (!dsi->ganged_type)
nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged write to all links, but no ganged type\n");
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-split-link-type", &temp))
dsi->split_link_type = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-suspend-aggr", &temp))
dsi->suspend_aggr = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-edp-bridge", &temp))
dsi->dsi2edp_bridge_enable = (bool)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-lvds-bridge", &temp))
dsi->dsi2lvds_bridge_enable = (bool)temp;
of_property_for_each_u32(np_dsi_panel, "nvidia,dsi-dpd-pads", prop, p, temp)
dsi->dpd_dsi_pads |= (u32)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-power-saving-suspend", &temp))
dsi->power_saving_suspend = (bool)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-ulpm-not-support", &temp))
dsi->ulpm_not_supported = (bool)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-video-data-type", &temp)) {
dsi->video_data_type = (u8)temp;
if ((temp != DSI_VIDEO_TYPE_VIDEO_MODE) &&
(temp != DSI_VIDEO_TYPE_COMMAND_MODE))
{
nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video data type\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-video-clock-mode", &temp)) {
dsi->video_clock_mode = (u8)temp;
if ((temp != DSI_VIDEO_CLOCK_CONTINUOUS) &&
(temp != DSI_VIDEO_CLOCK_TX_ONLY))
{
nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video clk mode\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,enable-vrr", &temp))
dsi->dsiEnVRR = (u8)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,vrr-force-set-te-pin", &temp))
dsi->dsiForceSetTePin = (u8)temp;
if (of_property_read_bool(np_dsi_panel,
"nvidia,send-init-cmds-early"))
dsi->sendInitCmdsEarly = true;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-init-cmd", &temp)) {
dsi->n_init_cmd = (u16)temp;
}
if (dsi->n_init_cmd > 0) {
dsi->dsi_init_cmd_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-init-cmd", NULL),
&dsi->init_cmd_array_size);
}
if (dsi->n_init_cmd &&
IS_ERR_OR_NULL(dsi->dsi_init_cmd_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI init cmd parsing from DT failed\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
};
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-postvideo-cmd", &temp)) {
dsi->n_postvideo_cmd = (u16)temp;
}
if (dsi->n_postvideo_cmd > 0) {
dsi->dsi_postvideo_cmd_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-postvideo-cmd", NULL),
&dsi->postvideo_cmd_array_size);
}
if (dsi->n_postvideo_cmd &&
IS_ERR_OR_NULL(dsi->dsi_postvideo_cmd_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI postvideo cmd parsing from DT failed\n");
goto parse_dsi_settings_fail;
};
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-suspend-cmd", &temp)) {
dsi->n_suspend_cmd = (u16)temp;
}
if (dsi->n_suspend_cmd > 0) {
dsi->dsi_suspend_cmd_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-suspend-cmd", NULL),
&dsi->suspend_cmd_array_size);
}
if (dsi->n_suspend_cmd &&
IS_ERR_OR_NULL(dsi->dsi_suspend_cmd_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI suspend cmd parsing from DT failed\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
};
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-early-suspend-cmd", &temp)) {
dsi->n_early_suspend_cmd = (u16)temp;
}
if (dsi->n_early_suspend_cmd > 0) {
dsi->dsi_early_suspend_cmd_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-early-suspend-cmd", NULL),
&dsi->early_suspend_cmd_array_size);
}
if (dsi->n_early_suspend_cmd &&
IS_ERR_OR_NULL(dsi->dsi_early_suspend_cmd_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI early suspend cmd parsing from DT failed\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
};
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-suspend-stop-stream-late", &temp)) {
dsi->suspend_stop_stream_late = (bool)temp;
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-n-late-resume-cmd", &temp)) {
dsi->n_late_resume_cmd = (u16)temp;
}
if (dsi->n_late_resume_cmd > 0) {
dsi->dsi_late_resume_cmd_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-late-resume-cmd", NULL),
&dsi->late_resume_cmd_array_size);
}
if (dsi->n_late_resume_cmd &&
IS_ERR_OR_NULL(dsi->dsi_late_resume_cmd_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI late resume cmd parsing from DT failed\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
};
dsi->pktSeq_array = dsi_read_prop_array(np_dsi_panel,
of_find_property(np_dsi_panel, "nvidia,dsi-pkt-seq", NULL),
&dsi->pktSeq_array_size);
if (IS_ERR(dsi->pktSeq_array)) {
nv_printf(NV_DBG_ERRORS, "NVRM: DSI packet seq parsing from DT fail\n");
ret = -EINVAL;
goto parse_dsi_settings_fail;
}
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-hsdexit", &temp))
dsi->phyTimingNs.t_hsdexit_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-hstrail", &temp))
dsi->phyTimingNs.t_hstrail_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-datzero", &temp))
dsi->phyTimingNs.t_datzero_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-hsprepare", &temp))
dsi->phyTimingNs.t_hsprepare_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-hsprebegin", &temp))
dsi->phyTimingNs.t_hsprebegin_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-hspost", &temp))
dsi->phyTimingNs.t_hspost_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-clktrail", &temp))
dsi->phyTimingNs.t_clktrail_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-clkpost", &temp))
dsi->phyTimingNs.t_clkpost_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-clkzero", &temp))
dsi->phyTimingNs.t_clkzero_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-tlpx", &temp))
dsi->phyTimingNs.t_tlpx_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-clkprepare", &temp))
dsi->phyTimingNs.t_clkprepare_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-clkpre", &temp))
dsi->phyTimingNs.t_clkpre_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-wakeup", &temp))
dsi->phyTimingNs.t_wakeup_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-taget", &temp))
dsi->phyTimingNs.t_taget_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-tasure", &temp))
dsi->phyTimingNs.t_tasure_ns = (u16)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsi-phy-tago", &temp))
dsi->phyTimingNs.t_tago_ns = (u16)temp;
if (of_property_read_bool(np_dsi_panel,
"nvidia,enable-link-compression"))
dsi->dsiDscEnable = true;
if (of_property_read_bool(np_dsi_panel,
"nvidia,enable-dual-dsc"))
dsi->dsiDscEnDualDsc = true;
if (of_property_read_bool(np_dsi_panel,
"nvidia,enable-block-pred"))
dsi->dsiDscEnBlockPrediction = true;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,slice-height", &temp))
dsi->dsiDscSliceHeight = (u32)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,num-of-slices", &temp))
dsi->dsiDscNumSlices = (u32)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,comp-rate", &temp))
dsi->dsiDscBpp = (u32)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,version-major", &temp))
dsi->dsiDscDecoderMajorVersion = (u32)temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,version-minor", &temp))
dsi->dsiDscDecoderMinorVersion = (u32)temp;
if (of_property_read_bool(np_dsi_panel,
"nvidia,use-custom-pps")) {
dsi->dsiDscUseCustomPPS = true;
ret = dsi_parse_pps_data(np_dsi_panel,
of_find_property(np_dsi_panel,
"nvidia,custom-pps-data", NULL),
dsi->dsiDscCustomPPSData);
if (ret != NV_OK) {
nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel Custom PPS data failed\n");
goto parse_dsi_settings_fail;
}
}
if (of_property_read_bool(np_dsi, "nvidia,dsi-csi-loopback"))
dsi->dsi_csi_loopback = 1;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,vpll0-rate-hz", &temp))
dsi->vpll0_rate_hz = temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsipll-vco-rate-hz", &temp))
dsi->dsipll_vco_rate_hz = temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsipll-clkouta-rate-hz", &temp))
dsi->dsipll_clkouta_rate_hz = temp;
if (!of_property_read_u32(np_dsi_panel,
"nvidia,dsipll-clkoutpn-rate-hz", &temp))
dsi->dsipll_clkoutpn_rate_hz = temp;
ret = dsi_get_panel_timings(np_dsi_panel, dsi);
if (ret != NV_OK) {
nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel Timings failed\n");
goto parse_dsi_settings_fail;
}
ret = dsi_get_panel_gpio(np_dsi_panel, dsi);
if (ret != NV_OK) {
nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel GPIOs failed\n");
goto parse_dsi_settings_fail;
}
parse_dsi_settings_fail:
return ret;
}
NvBool
nv_dsi_is_panel_connected
(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct device_node *np_dsi = NULL;
struct device_node *np_dsi_panel = NULL;
NvBool ret = NV_TRUE;
np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi");
if (np_dsi && !of_device_is_available(np_dsi)) {
ret = NV_FALSE;
goto fail;
}
np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0);
if (np_dsi_panel == NULL)
{
ret = NV_FALSE;
}
fail:
of_node_put(np_dsi_panel);
of_node_put(np_dsi);
return ret;
}
NV_STATUS
nv_dsi_parse_panel_props
(
nv_state_t *nv,
void *dsiPanelInfo
)
{
int ret = NV_OK;
struct device_node *np_dsi = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi");
if (np_dsi && !of_device_is_available(np_dsi)) {
nv_printf(NV_DBG_ERRORS, "NVRM: dsi node not enabled in DT\n");
of_node_put(np_dsi);
return NV_ERR_NOT_SUPPORTED;
}
ret = parse_dsi_properties(np_dsi, (DSI_PANEL_INFO *)dsiPanelInfo);
return ret;
}
NV_STATUS
nv_dsi_panel_enable
(
nv_state_t *nv,
void *dsiPanelInfo
)
{
int ret = NV_OK;
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 1);
}
mdelay(10); //Required 1ms delay
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 1);
}
mdelay(20); //Required 10ms delay
// If backlight enable gpio is specified, set it to output direction and pull high
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 1);
}
mdelay(10);
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 1);
}
mdelay(20); // Requied 10ms
return ret;
}
NV_STATUS
nv_dsi_panel_reset
(
nv_state_t *nv,
void *dsiPanelInfo
)
{
int ret = NV_OK;
int en_panel_rst = -1;
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
// Assert and deassert Panel reset GPIO
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) {
en_panel_rst = panelInfo->panel_gpio[DSI_GPIO_LCD_RESET];
} else {
nv_printf(NV_DBG_ERRORS, "DSI Panel reset gpio invalid\n");
goto fail;
}
ret = gpio_direction_output(en_panel_rst, 1);
if (ret < 0) {
nv_printf(NV_DBG_ERRORS, "Deasserting DSI panel reset gpio failed\n");
goto fail;
}
mdelay(10);
ret = gpio_direction_output(en_panel_rst, 0);
if (ret < 0) {
nv_printf(NV_DBG_ERRORS, "Asserting DSI panel reset gpio failed\n");
goto fail;
}
mdelay(10);
ret = gpio_direction_output(en_panel_rst, 1);
if (ret < 0) {
nv_printf(NV_DBG_ERRORS, "Deasserting Dsi panel reset gpio after asserting failed\n");
goto fail;
}
fail:
return ret;
}
void nv_dsi_panel_disable
(
nv_state_t *nv,
void *dsiPanelInfo
)
{
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 0);
}
mdelay(10);
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 0);
}
// Assert Panel reset GPIO
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET], 0);
}
mdelay(20);
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 0);
}
mdelay(10);
if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) {
gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 0);
}
}
void nv_dsi_panel_cleanup
(
nv_state_t *nv,
void *dsiPanelInfo
)
{
int count;
DSI_PANEL_INFO *panelInfo = dsiPanelInfo;
if (!IS_ERR_OR_NULL(panelInfo->dsi_init_cmd_array))
NV_KFREE(panelInfo->dsi_init_cmd_array, sizeof(u32) * panelInfo->init_cmd_array_size);
if (!IS_ERR_OR_NULL(panelInfo->dsi_early_suspend_cmd_array))
NV_KFREE(panelInfo->dsi_early_suspend_cmd_array, sizeof(u32) * panelInfo->early_suspend_cmd_array_size);
if (!IS_ERR_OR_NULL(panelInfo->dsi_late_resume_cmd_array))
NV_KFREE(panelInfo->dsi_late_resume_cmd_array, sizeof(u32) * panelInfo->late_resume_cmd_array_size);
if (!IS_ERR_OR_NULL(panelInfo->dsi_postvideo_cmd_array))
NV_KFREE(panelInfo->dsi_postvideo_cmd_array, sizeof(u32) * panelInfo->postvideo_cmd_array_size);
if (!IS_ERR_OR_NULL(panelInfo->dsi_suspend_cmd_array))
NV_KFREE(panelInfo->dsi_suspend_cmd_array, sizeof(u32) * panelInfo->suspend_cmd_array_size);
if (!IS_ERR_OR_NULL(panelInfo->pktSeq_array))
NV_KFREE(panelInfo->pktSeq_array, sizeof(u32) * panelInfo->pktSeq_array_size);
if (panelInfo->bl_name != NULL) {
NV_KFREE(panelInfo->bl_name, sizeof(u8) * bl_name_len);
}
for (count = 0; count < DSI_N_GPIO_PANEL; count++) {
if (gpio_is_valid(panelInfo->panel_gpio[count])) {
gpio_free(panelInfo->panel_gpio[count]);
}
}
panelInfo->panel_gpio_populated = false;
}

View File

@@ -0,0 +1,395 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-frontend.h"
MODULE_LICENSE("Dual MIT/GPL");
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
/*
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
* ("module: add support for symbol namespaces") in 5.4
*/
#if defined(MODULE_IMPORT_NS)
/*
* DMA_BUF namespace is added by commit id 16b0314aa746
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
*/
MODULE_IMPORT_NS(DMA_BUF);
#endif
static NvU32 nv_num_instances;
// lock required to protect table.
struct semaphore nv_module_table_lock;
// minor number table
nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1];
int nvidia_init_module(void);
void nvidia_exit_module(void);
/* EXPORTS to Linux Kernel */
int nvidia_frontend_open(struct inode *, struct file *);
int nvidia_frontend_close(struct inode *, struct file *);
unsigned int nvidia_frontend_poll(struct file *, poll_table *);
int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long);
long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long);
int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
/* character driver entry points */
static struct file_operations nv_frontend_fops = {
.owner = THIS_MODULE,
.poll = nvidia_frontend_poll,
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvidia_frontend_compat_ioctl,
#endif
.mmap = nvidia_frontend_mmap,
.open = nvidia_frontend_open,
.release = nvidia_frontend_close,
};
/* Helper functions */
static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all)
{
NvU32 i;
int rc = -1;
// look for free a minor number and assign unique minor number to this device
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
{
if (nv_minor_num_table[i] == NULL)
{
nv_minor_num_table[i] = module;
device->minor_num = i;
if (all == NV_TRUE)
{
device = device->next;
if (device == NULL)
{
rc = 0;
break;
}
}
else
{
rc = 0;
break;
}
}
}
return rc;
}
static int remove_device(nvidia_module_t *module, nv_linux_state_t *device)
{
int rc = -1;
// remove this device from minor_number table
if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL))
{
nv_minor_num_table[device->minor_num] = NULL;
device->minor_num = 0;
rc = 0;
}
return rc;
}
/* Export functions */
int nvidia_register_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
if (module->instance >= NV_MAX_MODULE_INSTANCES)
{
printk("NVRM: NVIDIA module instance %d registration failed.\n",
module->instance);
rc = -EINVAL;
goto done;
}
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
nv_minor_num_table[ctrl_minor_num] = module;
nv_num_instances++;
done:
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_register_module);
int nvidia_unregister_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
nv_minor_num_table[ctrl_minor_num] = NULL;
nv_num_instances--;
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_unregister_module);
int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = -1;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = add_device(module, device, NV_FALSE);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_add_device);
int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = remove_device(module, device);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_remove_device);
int nvidia_frontend_open(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
down(&nv_module_table_lock);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->open != NULL))
{
// Increment the reference count of module to ensure that module does
// not get unloaded if its corresponding device file is open, for
// example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open.
if (!try_module_get(module->owner))
{
up(&nv_module_table_lock);
return -ENODEV;
}
rc = module->open(inode, file);
if (rc < 0)
{
module_put(module->owner);
}
}
up(&nv_module_table_lock);
return rc;
}
int nvidia_frontend_close(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->close != NULL))
{
rc = module->close(inode, file);
// Decrement the reference count of module.
module_put(module->owner);
}
return rc;
}
unsigned int nvidia_frontend_poll(
struct file *file,
poll_table *wait
)
{
unsigned int mask = 0;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->poll != NULL))
mask = module->poll(file, wait);
return mask;
}
int nvidia_frontend_ioctl(
struct inode *inode,
struct file *file,
unsigned int cmd,
unsigned long i_arg)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->ioctl != NULL))
rc = module->ioctl(inode, file, cmd, i_arg);
return rc;
}
long nvidia_frontend_unlocked_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
long nvidia_frontend_compat_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
int nvidia_frontend_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
int rc = -ENODEV;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->mmap != NULL))
rc = module->mmap(file, vma);
return rc;
}
static int __init nvidia_frontend_init_module(void)
{
int status = 0;
// initialise nvidia module table;
nv_num_instances = 0;
memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table));
NV_INIT_MUTEX(&nv_module_table_lock);
status = nvidia_init_module();
if (status < 0)
{
return status;
}
// register char device
status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops);
if (status < 0)
{
printk("NVRM: register_chrdev() failed!\n");
nvidia_exit_module();
}
return status;
}
static void __exit nvidia_frontend_exit_module(void)
{
/*
* If this is the last nvidia_module to be unregistered, cleanup and
* unregister char dev
*/
if (nv_num_instances == 1)
{
unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend");
}
nvidia_exit_module();
}
module_init(nvidia_frontend_init_module);
module_exit(nvidia_frontend_exit_module);

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_FRONTEND_H_
#define _NV_FRONTEND_H_
#include "nvtypes.h"
#include "nv-linux.h"
#include "nv-register-module.h"
#define NV_MAX_MODULE_INSTANCES 8
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
NV_MAX_MODULE_INSTANCES)
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
extern nvidia_module_t *nv_minor_num_table[];
#endif

View File

@@ -0,0 +1,271 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "os_gpio.h"
#define NV_GPIOF_DIR_IN (1 << 0)
/*!
* @brief Mapping array of OS GPIO function ID to OS function name,
* this name is used to get GPIO number from Device Tree.
*/
static const char *osMapGpioFunc[] = {
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
};
NV_STATUS NV_API_CALL nv_gpio_get_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *pinValue
)
{
int ret;
#if defined(NV_GPIO_GET_VALUE_PRESENT)
ret = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_ERR_GENERIC;
#endif
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*pinValue = ret;
return NV_OK;
}
void NV_API_CALL nv_gpio_set_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 pinValue
)
{
#if defined(NV_GPIO_SET_VALUE_PRESENT)
gpio_set_value(pinNum, pinValue);
#else
nv_printf(NV_DBG_ERRORS, "gpio_set_value not present\n");
#endif
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
int ret;
if (direction)
{
#if defined(NV_GPIO_DIRECTION_INPUT_PRESENT)
ret = gpio_direction_input(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_input not present\n");
return NV_ERR_GENERIC;
#endif
}
else
{
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT)
ret = gpio_direction_output(pinNum, 0);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_output not present\n");
return NV_ERR_GENERIC;
#endif
}
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *direction
)
{
/*!
* TODO: Commenting out until gpio_get_direction wrapper
* support is added in kernel.
*/
#if 0
int ret;
ret = nv_gpio_get_direction(pinNum);
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*direction = ret;
#endif
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_number
(
nv_state_t *nv,
NvU32 function,
NvU32 *pinNum
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
#if defined(NV_OF_GET_NAME_GPIO_PRESENT)
rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0);
#else
nv_printf(NV_DBG_ERRORS, "of_get_named_gpio not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "of_get_name_gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
*pinNum = rc;
#if defined(NV_DEVM_GPIO_REQUEST_ONE_PRESENT)
rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN,
osMapGpioFunc[function]);
#else
nv_printf(NV_DBG_ERRORS, "devm_gpio_request_one not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "request gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
NvU32 irqGpioPin;
NvU32 pinValue;
if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE)
{
return NV_FALSE;
}
nv_get_current_irq_priv_data(nv, &irqGpioPin);
if (pinNum != irqGpioPin)
{
return NV_FALSE;
}
#if defined(NV_GPIO_GET_VALUE_PRESENT)
pinValue = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_FALSE;
#endif
if (pinValue != direction)
{
return NV_FALSE;
}
return NV_TRUE;
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
(
nv_state_t * nv,
NvU32 pinNum,
NvU32 trigger_level
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
int irq_num;
#if defined(NV_GPIO_TO_IRQ_PRESENT)
irq_num = gpio_to_irq(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_to_irq not present\n");
return NV_ERR_GENERIC;
#endif
/*
* Ignore setting interrupt for falling trigger for hotplug gpio pin
* as hotplug sequence calls this function twice to set the level
* (rising/falling) of interrupt for same gpio pin. Linux interrupt
* registration allows only once to register the interrupt with required
* trigger levels. So to avoid re-registration, skip registering for
* falling trigger level but when this function called with rising trigger
* then itself register for both rising/falling triggers.
*/
if (trigger_level == 0)
{
return NV_OK;
}
rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE,
(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT), pinNum,
"hdmi-hotplug");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "IRQ registration failed for gpio - %d, rc - %d\n",
pinNum, rc);
return NV_ERR_GENERIC;
}
/* Disable the irq after registration as RM init sequence re-enables it */
disable_irq_nosync(irq_num);
return NV_OK;
}

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(NV_LINUX_NVHOST_T194_H_PRESENT)
#include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
struct platform_device *host1x_pdev = NULL;
phys_addr_t base;
size_t size;
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_get_default_device
host1x_pdev = nvhost_get_default_device();
if (host1x_pdev == NULL)
{
return NV_ERR_INVALID_DEVICE;
}
#endif
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_aperture && \
NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_byte_offset
nvhost_syncpt_unit_interface_get_aperture(
host1x_pdev, &base, &size);
*physAddr = base;
*limit = nvhost_syncpt_unit_interface_get_byte_offset(1);
*offset = nvhost_syncpt_unit_interface_get_byte_offset(syncpointId);
#else
return NV_ERR_NOT_SUPPORTED;
#endif
return NV_OK;
}
#else
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

573
kernel-open/nvidia/nv-i2c.c Normal file
View File

@@ -0,0 +1,573 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/i2c.h>
#include "os-interface.h"
#include "nv-linux.h"
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
unsigned int i = 0;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
const unsigned int supported_i2c_flags = I2C_M_RD
#if defined(I2C_M_DMA_SAFE)
| I2C_M_DMA_SAFE
#endif
;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++)
{
if (msgs[i].flags & ~supported_i2c_flags)
{
/* we only support basic I2C reads/writes, reject any other commands */
rc = -EINVAL;
nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n",
msgs[i].flags);
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
else
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(msgs[i].flags & I2C_M_RD) ?
NV_I2C_CMD_READ : NV_I2C_CMD_WRITE,
(NvU8)(msgs[i].addr & 0x7f), 0,
(NvU32)(msgs[i].len & 0xffffUL),
(NvU8 *)msgs[i].buf);
}
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : num;
}
static int nv_i2c_algo_smbus_xfer(
struct i2c_adapter *adapter,
u16 addr,
unsigned short flags,
char read_write,
u8 command,
int size,
union i2c_smbus_data *data
)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
switch (size)
{
case I2C_SMBUS_QUICK:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_QUICK_READ :
NV_I2C_CMD_SMBUS_QUICK_WRITE,
(NvU8)(addr & 0x7f), 0, 0, NULL);
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ)
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_READ,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data->byte);
}
else
{
u8 data = command;
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_WRITE,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data);
}
break;
case I2C_SMBUS_BYTE_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 1,
(NvU8 *)&data->byte);
break;
case I2C_SMBUS_WORD_DATA:
if (read_write != I2C_SMBUS_READ)
{
u16 word = data->word;
data->block[1] = (word & 0xff);
data->block[2] = (word >> 8);
}
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 2,
(NvU8 *)&data->block[1]);
if (read_write == I2C_SMBUS_READ)
{
data->word = ((NvU16)data->block[1]) |
((NvU16)data->block[2] << 8);
}
break;
case I2C_SMBUS_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_BLOCK_READ :
NV_I2C_CMD_SMBUS_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
sizeof(data->block),
(NvU8 *)data->block);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_BLOCK_READ :
NV_I2C_CMD_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
(NvU8)data->block[0],
(NvU8 *)&data->block[1]);
break;
default:
rc = -EINVAL;
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : 0;
}
static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
u32 ret = I2C_FUNC_I2C;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return 0;
}
if (rm_i2c_is_smbus_capable(sp, nv, adapter))
{
ret |= (I2C_FUNC_SMBUS_QUICK |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK);
}
nv_kmem_cache_free_stack(sp);
return ret;
}
static struct i2c_algorithm nv_i2c_algo = {
.master_xfer = nv_i2c_algo_master_xfer,
.smbus_xfer = nv_i2c_algo_smbus_xfer,
.functionality = nv_i2c_algo_functionality,
};
struct i2c_adapter nv_i2c_adapter_prototype = {
.owner = THIS_MODULE,
.algo = &nv_i2c_algo,
.algo_data = NULL,
};
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
NV_STATUS rmStatus;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *pI2cAdapter = NULL;
int osstatus = 0;
// get a i2c adapter
rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter));
if (rmStatus != NV_OK)
return NULL;
// fill in with default structure
os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter));
pI2cAdapter->dev.parent = nvl->dev;
if (nvl->pci_dev != NULL)
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus,
nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn));
}
else
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA SOC i2c adapter %u", port);
}
// add our data to the structure
pI2cAdapter->algo_data = (void *)nv;
// attempt to register with the kernel
osstatus = i2c_add_adapter(pI2cAdapter);
if (osstatus)
{
// free the memory and NULL the ptr
os_free_mem(pI2cAdapter);
pI2cAdapter = NULL;
}
return ((void *)pI2cAdapter);
}
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data;
if (pI2cAdapter)
{
// release with the OS
i2c_del_adapter(pI2cAdapter);
os_free_mem(pI2cAdapter);
}
}
static struct i2c_client * nv_i2c_register_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
int c_index;
struct i2c_board_info i2c_dev_info = {
.type = "tegra_display",
.addr = address,
};
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT)
client = i2c_new_client_device(i2c_adapter, &i2c_dev_info);
#else
nv_printf(NV_DBG_ERRORS, "nv_i2c_new_device not present\n");
client = NULL;
#endif
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to register client for address(0x%x)",
address);
i2c_put_adapter(i2c_adapter);
return NULL;
}
i2c_put_adapter(i2c_adapter);
/* Save the Port and i2c client */
nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL)
{
nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client;
break;
}
}
return client;
}
static struct i2c_client *nv_i2c_get_registered_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int c_index;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index];
if (client)
{
if (address == (NvU8)client->addr)
{
return client;
}
}
else
{
break;
}
}
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
struct i2c_client *client;
struct i2c_msg *msgs;
int count;
int rc;
NV_STATUS status = NV_OK;
NvU32 linuxI2CSwPort;
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
address = address >> 1;
//
// Linux Tegra I2C controller driver uses logical port(controller) number
// where logical port number of I2C1(Gen1) controller is 0, logical port
// number for I2C2(Gen2) controller is 1 and so on.
// But RM passes I2C physical port(controller) number i.e RM passes "1"
// for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number
// to logical port number(linuxI2CSwPort).
//
linuxI2CSwPort = physicalI2CPort - 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
for (count = 0; count < num_msgs; count++) {
//
// RM style client address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
nv_msgs[count].addr = nv_msgs[count].addr >> 1;
client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr);
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c client register failed for addr:0x%x\n",
nv_msgs[count].addr);
return NV_ERR_GENERIC;
}
}
}
msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL);
if (msgs == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c message allocation failed\n");
return NV_ERR_NO_MEMORY;
}
for (count = 0; count < num_msgs; count++) {
msgs[count].addr = nv_msgs[count].addr;
msgs[count].flags = nv_msgs[count].flags;
msgs[count].len = nv_msgs[count].len;
msgs[count].buf = nv_msgs[count].buf;
}
rc = i2c_transfer(client->adapter, msgs, num_msgs);
if (rc != num_msgs)
{
nv_printf(NV_DBG_ERRORS, "i2c transfer failed for addr:0x%x",
address);
status = NV_ERR_GENERIC;
}
kfree(msgs);
return status;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int p_index, c_index;
for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++)
{
for (c_index = 0;
c_index < MAX_CLIENTS_PER_ADAPTER;
c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index];
if (client)
{
#if defined(NV_I2C_UNREGISTER_DEVICE_PRESENT)
i2c_unregister_device(client);
#else
nv_printf(NV_DBG_ERRORS, "i2c_unregister_device not present\n");
#endif
nvl->i2c_clients[p_index].pOsClient[c_index] = NULL;
}
}
}
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvS32 *scl,
NvS32 *sda)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT_i2c_bus_status
NvU32 linuxI2CSwPort;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
int ret;
//
// Linux Tegra I2C controller driver uses logical port(controller) number
// where logical port number of I2C1(Gen1) controller is 0, logical port
// number for I2C2(Gen2) controller is 1 and so on.
// But RM passes I2C physical port(controller) number i.e RM passes "1"
// for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number
// to logical port number(linuxI2CSwPort).
//
linuxI2CSwPort = physicalI2CPort - 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
ret = i2c_bus_status(i2c_adapter, scl, sda);
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "i2c_bus_status failed:%d\n", ret);
return NV_ERR_GENERIC;
}
i2c_put_adapter(i2c_adapter);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
}
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
return NV_OK;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvS32 *scl,
NvS32 *sda)
{
return NV_ERR_GENERIC;
}
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)

View File

@@ -0,0 +1,441 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module"
*/
#include "nv-linux.h"
#if defined(NVCPU_PPC64LE)
#include "nv-ibmnpu.h"
#include "nv-rsync.h"
/*
* Temporary query to get the L1D cache block size directly from the device
* tree for the offline cache flush workaround, since the ppc64_caches symbol
* is unavailable to us.
*/
const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80;
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
{
const __be32 *block_size_prop;
/*
* Attempt to look up the block size from device tree. If unavailable, just
* return the default that we see on these systems.
*/
struct device_node *cpu = of_find_node_by_type(NULL, "cpu");
if (!cpu)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL);
if (!block_size_prop)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
return be32_to_cpu(*block_size_prop);
}
/*
* GPU device memory can be exposed to the kernel as NUMA node memory via the
* IBMNPU devices associated with the GPU. The platform firmware will specify
* the parameters of where the memory lives in the system address space via
* firmware properties on the IBMNPU devices. These properties specify what
* memory can be accessed through the IBMNPU device, and the driver can online
* a GPU device's memory into the range accessible by its associated IBMNPU
* devices.
*
* This function calls over to the IBMNPU driver to query the parameters from
* firmware, and validates that the resulting parameters are acceptable.
*/
static void nv_init_ibmnpu_numa_info(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info;
struct pci_dev *npu_dev = nvl->npu->devs[0];
NvU64 spa, gpa, aper_size;
/*
* Terminology:
* - system physical address (spa): 47-bit NVIDIA physical address, which
* is the CPU real address with the NVLink address compression scheme
* already applied in firmware.
* - guest physical address (gpa): 56-bit physical address as seen by the
* operating system. This is the base address that we should use for
* onlining device memory.
*/
nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa,
&aper_size);
if (nvl->numa_info.node_id == NUMA_NO_NODE)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n");
return;
}
/* Validate that the compressed system physical address is not too wide */
if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1)))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Invalid NUMA memory system pa 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u\n",
spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
goto invalid_numa_config;
}
/*
* Validate that the guest physical address is aligned to 128GB.
* This alignment requirement comes from the Volta address space
* size on POWER9.
*/
if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width)))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Invalid alignment in NUMA memory guest pa 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u\n",
gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
goto invalid_numa_config;
}
/* Validate that the aperture can map all of the device's framebuffer */
if (aper_size < nv->fb->size)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Insufficient NUMA memory aperture size 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n",
aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev),
NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev),
PCI_FUNC(npu_dev->devfn), nv->fb->size);
goto invalid_numa_config;
}
npu_numa_info->compr_sys_phys_addr = spa;
npu_numa_info->guest_phys_addr = gpa;
if (NVreg_EnableUserNUMAManagement)
{
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
}
else
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n");
nvl->numa_info.node_id = NUMA_NO_NODE;
}
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: "
"[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n",
spa, gpa, aper_size);
/* Get the CPU's L1D cache block size for offlining cache flush */
npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size();
return;
invalid_numa_config:
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"NUMA memory aperture disabled due to invalid firmware configuration\n");
nvl->numa_info.node_id = NUMA_NO_NODE;
}
void nv_init_ibmnpu_info(nv_state_t *nv)
{
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0);
NvU8 dev_count;
if (!npu_dev)
{
return;
}
if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK)
{
return;
}
os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t));
/* Find any other IBMNPU devices attached to this GPU */
for (nvl->npu->devs[0] = npu_dev, dev_count = 1;
dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++)
{
nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count);
if (!nvl->npu->devs[dev_count])
{
break;
}
}
nvl->npu->dev_count = dev_count;
/*
* If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will
* need to be bumped.
*/
WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) &&
pnv_pci_get_npu_dev(nvl->pci_dev, dev_count));
ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs);
if (nvl->npu->genregs.size > 0)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
" has a generation register space 0x%llx-0x%llx\n",
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn),
nvl->npu->genregs.start_addr,
nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1);
}
else
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
"does not support generation registers\n",
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
}
nv_init_ibmnpu_numa_info(nv);
#endif
}
void nv_destroy_ibmnpu_info(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu != NULL)
{
os_free_mem(nvl->npu);
nvl->npu = NULL;
}
}
int nv_init_ibmnpu_devices(nv_state_t *nv)
{
NvU8 i;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (!nvl->npu)
{
return 0;
}
for (i = 0; i < nvl->npu->dev_count; i++)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"Initializing IBM-NPU device %04x:%02x:%02x.%u\n",
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
PCI_FUNC(nvl->npu->devs[i]->devfn));
if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS)
{
nv_unregister_ibmnpu_devices(nv);
return -EIO;
}
nvl->npu->initialized_dev_count++;
}
return 0;
}
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
{
NvU8 i;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (!nvl->npu)
{
return;
}
for (i = 0; i < nvl->npu->initialized_dev_count; i++)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"Unregistering IBM-NPU device %04x:%02x:%02x.%u\n",
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
PCI_FUNC(nvl->npu->devs[i]->devfn));
ibmnpu_unregister_device(nvl->npu->devs[i]);
}
nvl->npu->initialized_dev_count = 0;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
NvU64 *size, void **device)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return NV_ERR_NOT_SUPPORTED;
}
if (addr)
{
*addr = nvl->npu->genregs.start_addr;
}
if (size)
{
*size = nvl->npu->genregs.size;
}
if (device)
{
*device = (void*)nvl->npu->devs[0];
}
return NV_OK;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
NvBool *mode)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return NV_ERR_NOT_SUPPORTED;
}
*mode = nv_get_rsync_relaxed_ordering_mode(nv);
return NV_OK;
}
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return;
}
nv_wait_for_rsync(nv);
}
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL)
{
return -1;
}
return ibmnpu_device_get_chip_id(nvl->npu->devs[0]);
}
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU64 offset, cbsize;
/*
* The range is commonly an ioremap()ed mapping of the GPU's ATS range and
* needs to be compared against the created mappings. Alternatively, kernel
* page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled.
*/
NV_DEV_PRINTF(NV_DBG_INFO, nv,
"Flushing CPU virtual range [0x%llx, 0x%llx)\n",
cpu_virtual, cpu_virtual + size);
cbsize = nvl->npu->numa_info.l1d_cache_block_size;
CACHE_FLUSH();
/* Force eviction of any cache lines from the NUMA-onlined region. */
for (offset = 0; offset < size; offset += cbsize)
{
asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory");
/* Reschedule if necessary to avoid lockup warnings */
cond_resched();
}
CACHE_FLUSH();
}
#else
void nv_init_ibmnpu_info(nv_state_t *nv)
{
}
void nv_destroy_ibmnpu_info(nv_state_t *nv)
{
}
int nv_init_ibmnpu_devices(nv_state_t *nv)
{
return 0;
}
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
NvU64 *size, void **device)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
NvBool *mode)
{
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
{
}
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
{
return -1;
}
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size)
{
}
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv)
{
}
#endif

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_IBMNPU_H_
#define _NV_IBMNPU_H_
#if defined(NVCPU_PPC64LE)
#include "ibmnpu_linux.h"
#define NV_MAX_ATTACHED_IBMNPUS 6
typedef struct nv_npu_numa_info
{
/*
* 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit
* address with NVLink address compression scheme applied.
*/
NvU64 compr_sys_phys_addr;
/*
* 56-bit NVIDIA 'guest physical address'/host virtual address. On
* unvirtualized systems, applying the NVLink address compression scheme
* to this address should be the same as compr_sys_phys_addr.
*/
NvU64 guest_phys_addr;
/*
* L1 data cache block size on P9 - needed to manually flush/invalidate the
* NUMA region from the CPU caches after offlining.
*/
NvU32 l1d_cache_block_size;
} nv_npu_numa_info_t;
struct nv_ibmnpu_info
{
NvU8 dev_count;
NvU8 initialized_dev_count;
struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS];
ibmnpu_genregs_info_t genregs;
nv_npu_numa_info_t numa_info;
};
/*
* TODO: These parameters are specific to Volta/P9 configurations, and may
* need to be determined dynamically in the future.
*/
static const NvU32 nv_volta_addr_space_width = 37;
static const NvU32 nv_volta_dma_addr_size = 47;
#endif
void nv_init_ibmnpu_info(nv_state_t *nv);
void nv_destroy_ibmnpu_info(nv_state_t *nv);
int nv_init_ibmnpu_devices(nv_state_t *nv);
void nv_unregister_ibmnpu_devices(nv_state_t *nv);
int nv_get_ibmnpu_chip_id(nv_state_t *nv);
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv);
#endif

724
kernel-open/nvidia/nv-imp.c Normal file
View File

@@ -0,0 +1,724 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp-abi.h>
#endif
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp.h>
#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT)
#include <soc/tegra/tegra_bpmp.h>
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT
#include <dt-bindings/interconnect/tegra_icc_id.h>
#endif
#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT
#include <linux/platform/tegra/mc_utils.h>
#endif
//
// IMP requires information from various BPMP and MC driver functions. The
// macro below checks that all of the required functions are present.
//
#define IMP_SUPPORT_FUNCTIONS_PRESENT \
NV_IS_EXPORT_SYMBOL_PRESENT_dram_clk_to_mc_clk && \
NV_IS_EXPORT_SYMBOL_PRESENT_get_dram_num_channels && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dram_types && \
(defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \
IS_ENABLED(CONFIG_TEGRA_BPMP)) && \
defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT)
//
// Also create a macro to check if all the required ICC symbols are present.
// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h.
//
#define ICC_SUPPORT_FUNCTIONS_PRESENT \
defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT)
#if IMP_SUPPORT_FUNCTIONS_PRESENT
static struct mrq_emc_dvfs_latency_response latency_table;
static struct mrq_emc_dvfs_emchub_response emchub_table;
static struct cmd_iso_client_get_max_bw_response max_bw_table;
/*!
* @brief Converts the MC driver dram type to RM format
*
* The MC driver's tegra_dram_types() function returns the dram type as an
* enum. We convert it to an NvU32 for better ABI compatibility when stored in
* the TEGRA_IMP_IMPORT_DATA structure, which is shared between various
* software components.
*
* @param[in] dram_type Dram type (DRAM_TYPE_LPDDRxxx format).
*
* @returns dram type (TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDRxxx format).
*/
static inline NvU32
nv_imp_convert_dram_type_to_rm_format
(
enum dram_types dram_type
)
{
NvU32 rm_dram_type;
switch (dram_type)
{
case DRAM_TYPE_LPDDR4_16CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_16CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_8CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_8CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_4CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_4CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_16CH_1RANK:
case DRAM_TYPE_LPDDR4_16CH_2RANK:
case DRAM_TYPE_LPDDR4_8CH_1RANK:
case DRAM_TYPE_LPDDR4_8CH_2RANK:
case DRAM_TYPE_LPDDR4_4CH_1RANK:
case DRAM_TYPE_LPDDR4_4CH_2RANK:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4;
break;
case DRAM_TYPE_LPDDR5_16CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_16CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_8CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_8CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_4CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_4CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_16CH_1RANK:
case DRAM_TYPE_LPDDR5_16CH_2RANK:
case DRAM_TYPE_LPDDR5_8CH_1RANK:
case DRAM_TYPE_LPDDR5_8CH_2RANK:
case DRAM_TYPE_LPDDR5_4CH_1RANK:
case DRAM_TYPE_LPDDR5_4CH_2RANK:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5;
break;
default:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN;
break;
}
return rm_dram_type;
}
#endif // IMP_SUPPORT_FUNCTIONS_PRESENT
/*!
* @brief Collects IMP-relevant BPMP data and saves for later
*
* @param[in] nvl OS-specific device state
*
* @returns NV_OK if successful,
* NV_ERR_GENERIC if the BPMP API returns an error,
* NV_ERR_MISSING_TABLE_ENTRY if the latency table has no entries,
* NV_ERR_INVALID_DATA if the number of clock entries in the latency
* table does not match the number of entries in the emchub table, or
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
*/
NV_STATUS
nv_imp_get_bpmp_data
(
nv_linux_state_t *nvl
)
{
#if IMP_SUPPORT_FUNCTIONS_PRESENT
NV_STATUS status = NV_OK;
int rc;
int i;
NvBool bApiTableInvalid = NV_FALSE;
static const struct iso_max_bw dummy_iso_bw_pairs[] =
{ { 204000U, 1472000U },
{ 533000U, 3520000U },
{ 665000U, 4352000U },
{ 800000U, 5184000U },
{ 1066000U, 6784000U },
{ 1375000U, 8704000U },
{ 1600000U, 10112000U },
{ 1866000U, 11712000U },
{ 2133000U, 13376000U },
{ 2400000U, 15040000U },
{ 2750000U, 17152000U },
{ 3000000U, 18688000U },
{ 3200000U, 20800000U }
};
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_iso_client_request iso_client_request;
/*
* The existing "nvidia,skip-clk-rsts" DT property is currently being used
* to skip both clock/reset operations and BPMP MRQ calls. However, there
* are cases in which we want to enable clock/reset programming, but still
* keep the MRQ calls disabled. As such, an additional "nvidia,skip-bpmp-mrqs"
* property is being introduced solely for stubbing any MRQ calls, and
* "nvidia,skip-clk-rsts" will be decoupled so that it only applies to
* clock/reset operations.
*
* The "nvidia,skip-clk-rsts" check will be removed here once the relevant
* DT changes make their way to chips_a.
*/
if (of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts") ||
of_property_read_bool(nvl->dev->of_node, "nvidia,skip-bpmp-mrqs"))
{
return NV_OK;
}
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
// Get the table of dramclk / DVFS latency pairs.
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_EMC_DVFS_LATENCY;
msg.tx.data = NULL;
msg.tx.size = 0;
msg.rx.data = &latency_table;
msg.rx.size = sizeof(latency_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
// Get the table of dramclk / DVFS latency pairs.
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_LATENCY,
NULL,
0,
&latency_table,
sizeof(latency_table));
#endif
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY returns error code %d\n", rc);
status = NV_ERR_GENERIC;
goto Cleanup;
}
nv_printf(NV_DBG_INFO,
"MRQ_EMC_DVFS_LATENCY table size = %u\n",
latency_table.num_pairs);
if (latency_table.num_pairs == 0U)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY table has no entries\n", rc);
status = NV_ERR_MISSING_TABLE_ENTRY;
goto Cleanup;
}
// Get the table of dramclk / emchubclk pairs.
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_EMC_DVFS_EMCHUB;
msg.tx.data = NULL;
msg.tx.size = 0;
msg.rx.data = &emchub_table;
msg.rx.size = sizeof(emchub_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_EMCHUB,
NULL,
0,
&emchub_table,
sizeof(emchub_table));
#endif
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_EMCHUB returns error code %d\n", rc);
status = NV_ERR_GENERIC;
goto Cleanup;
}
nv_printf(NV_DBG_INFO,
"MRQ_EMC_DVFS_EMCHUB table size = %u\n",
emchub_table.num_pairs);
if (latency_table.num_pairs != emchub_table.num_pairs)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY table size (%u) does not match MRQ_EMC_DVFS_EMCHUB table size (%u)\n",
latency_table.num_pairs,
emchub_table.num_pairs);
status = NV_ERR_INVALID_DATA;
goto Cleanup;
}
// Get the table of dramclk / max ISO BW pairs.
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
memset(&iso_client_request, 0, sizeof(iso_client_request));
iso_client_request.cmd = CMD_ISO_CLIENT_GET_MAX_BW;
iso_client_request.max_isobw_req.id = TEGRA_ICC_DISPLAY;
msg.mrq = MRQ_ISO_CLIENT;
msg.tx.data = &iso_client_request;
msg.tx.size = sizeof(iso_client_request);
msg.rx.data = &max_bw_table;
msg.rx.size = sizeof(max_bw_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
// Maybe we don't need the old implementation "else" clause cases anymore.
NV_ASSERT(NV_FALSE);
#endif
if ((rc != 0) || (max_bw_table.num_pairs == 0U))
{
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_ISO_CLIENT returns error code %d\n", rc);
}
else
{
nv_printf(NV_DBG_ERRORS,
"CMD_ISO_CLIENT_GET_MAX_BW table does not contain any entries\n");
}
bApiTableInvalid = NV_TRUE;
}
else
{
//
// Check for entries with ISO BW = 0. It's possible that one entry may
// be zero, but they should not all be zero. (On simulation, due to bug
// 3379796, the API is currently not working; it returns 13 entries,
// each with ISO BW = 0.)
//
bApiTableInvalid = NV_TRUE;
for (i = 0; i < max_bw_table.num_pairs; i++)
{
if (max_bw_table.pairs[i].iso_bw != 0U)
{
bApiTableInvalid = NV_FALSE;
break;
}
}
}
if (bApiTableInvalid)
{
//
// If the table is not returned correctly, for now, fill in a dummy
// table.
//
nv_printf(NV_DBG_ERRORS,
"Creating dummy CMD_ISO_CLIENT_GET_MAX_BW table\n");
max_bw_table.num_pairs = sizeof(dummy_iso_bw_pairs) /
sizeof(dummy_iso_bw_pairs[0]);
for (i = 0; i < max_bw_table.num_pairs; i++)
{
max_bw_table.pairs[i].freq = dummy_iso_bw_pairs[i].freq;
max_bw_table.pairs[i].iso_bw = dummy_iso_bw_pairs[i].iso_bw;
}
}
nv_printf(NV_DBG_INFO,
"CMD_ISO_CLIENT_GET_MAX_BW table size = %u\n",
max_bw_table.num_pairs);
Cleanup:
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
tegra_bpmp_put(bpmp);
#endif
return status;
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Returns IMP-relevant data collected from other modules
*
* @param[out] tegra_imp_import_data Structure to receive the data
*
* @returns NV_OK if successful,
* NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is
* too small,
* NV_ERR_INVALID_DATA if the latency table has different mclk
* frequencies, compared with the emchub table, or
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
*/
NV_STATUS NV_API_CALL
nv_imp_get_import_data
(
TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data
)
{
#if IMP_SUPPORT_FUNCTIONS_PRESENT
NvU32 i;
NvU32 bwTableIndex = 0U;
NvU32 dram_clk_freq_khz;
enum dram_types dram_type;
tegra_imp_import_data->num_dram_clk_entries = latency_table.num_pairs;
if (ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance) <
latency_table.num_pairs)
{
nv_printf(NV_DBG_ERRORS,
"ERROR: TEGRA_IMP_IMPORT_DATA struct needs to have at least "
"%d dram_clk_instance entries, but only %d are allocated\n",
latency_table.num_pairs,
ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance));
return NV_ERR_BUFFER_TOO_SMALL;
}
//
// Copy data that we collected earlier in the BPMP tables into the caller's
// IMP import structure.
//
for (i = 0U; i < latency_table.num_pairs; i++)
{
dram_clk_freq_khz = latency_table.pairs[i].freq;
//
// For each dramclk frequency, we get some information from the EMCHUB
// table and some information from the LATENCY table. We expect both
// tables to have entries for the same dramclk frequencies.
//
if (dram_clk_freq_khz != emchub_table.pairs[i].freq)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY index #%d dramclk freq (%d KHz) does not match "
"MRQ_EMC_DVFS_EMCHUB index #%d dramclk freq (%d KHz)\n",
i, latency_table.pairs[i].freq,
i, emchub_table.pairs[i].freq);
return NV_ERR_INVALID_DATA;
}
// Copy a few values to the caller's table.
tegra_imp_import_data->dram_clk_instance[i].dram_clk_freq_khz =
dram_clk_freq_khz;
tegra_imp_import_data->dram_clk_instance[i].switch_latency_ns =
latency_table.pairs[i].latency;
tegra_imp_import_data->dram_clk_instance[i].mc_clk_khz =
dram_clk_to_mc_clk(dram_clk_freq_khz / 1000U) * 1000U;
// MC hubclk is 1/2 of scf clk, which is the same as EMCHUB clk.
tegra_imp_import_data->dram_clk_instance[i].mchub_clk_khz =
emchub_table.pairs[i].hub_freq / 2U;
//
// The ISO BW table may have more entries then the number of dramclk
// frequencies supported on current chip (i.e., more entries than we
// have in the EMCHUB and LATENCY tables). For each dramclk entry that
// we are filling out, search through the ISO BW table to find the
// largest dramclk less than or equal to the dramclk frequency for
// index "i", and use that ISO BW entry. (We assume all tables have
// their entries in order of increasing dramclk frequency.)
//
// Note: Some of the dramclk frequencies in the ISO BW table have been
// observed to be "rounded down" (e.g., 665000 KHz instead of 665600
// KHz).
//
while ((bwTableIndex + 1U < max_bw_table.num_pairs) &&
(dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex + 1U].freq))
{
nv_printf(NV_DBG_INFO,
"Max ISO BW table: index %u, dramclk = %u KHz, max ISO BW = %u KB/sec\n",
bwTableIndex,
max_bw_table.pairs[bwTableIndex].freq,
max_bw_table.pairs[bwTableIndex].iso_bw);
bwTableIndex++;
}
if (dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex].freq)
{
nv_printf(NV_DBG_INFO,
"For dramclk = %u KHz, setting max ISO BW = %u KB/sec\n",
dram_clk_freq_khz,
max_bw_table.pairs[bwTableIndex].iso_bw);
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps =
max_bw_table.pairs[bwTableIndex].iso_bw;
}
else
{
//
// Something went wrong. Maybe the ISO BW table doesn't have any
// entries with dramclk frequency as small as the frequency in the
// EMCHUB and LATENCY tables, or maybe the entries are out of
// order.
//
nv_printf(NV_DBG_ERRORS,
"Couldn't get max ISO BW for dramclk = %u KHz\n",
dram_clk_freq_khz);
return NV_ERR_INVALID_DATA;
}
}
dram_type = tegra_dram_types();
tegra_imp_import_data->dram_type =
nv_imp_convert_dram_type_to_rm_format(dram_type);
tegra_imp_import_data->num_dram_channels = get_dram_num_channels();
// Record the overall maximum possible ISO BW.
i = latency_table.num_pairs - 1U;
tegra_imp_import_data->max_iso_bw_kbps =
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps;
return NV_OK;
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Tells BPMP whether or not RFL is valid
*
* Display HW generates an ok_to_switch signal which asserts when mempool
* occupancy is high enough to be able to turn off memory long enough to
* execute a dramclk frequency switch without underflowing display output.
* ok_to_switch drives the RFL ("request for latency") signal in the memory
* unit, and the switch sequencer waits for this signal to go active before
* starting a dramclk switch. However, if the signal is not valid (e.g., if
* display HW or SW has not been initialized yet), the switch sequencer ignores
* the signal. This API tells BPMP whether or not the signal is valid.
*
* @param[in] nv Per GPU Linux state
* @param[in] bEnable True if RFL will be valid; false if invalid
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_enable_disable_rfl
(
nv_state_t *nv,
NvBool bEnable
)
{
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
#if IMP_SUPPORT_FUNCTIONS_PRESENT
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev);
struct tegra_bpmp_message msg;
struct mrq_emc_disp_rfl_request emc_disp_rfl_request;
int rc;
memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request));
emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED :
EMC_DISP_RFL_MODE_DISABLED;
msg.mrq = MRQ_EMC_DISP_RFL;
msg.tx.data = &emc_disp_rfl_request;
msg.tx.size = sizeof(emc_disp_rfl_request);
msg.rx.data = NULL;
msg.rx.size = 0;
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc == 0)
{
nv_printf(NV_DBG_INFO,
"\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n",
bEnable ? "enabled" : "disabled");
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n",
bEnable ? "enable" : "disable",
rc);
status = NV_ERR_GENERIC;
}
#else
// Maybe we don't need the old implementation "else" clause cases anymore.
NV_ASSERT(NV_FALSE);
#endif
#endif
return status;
}
/*!
* @brief Obtains a handle for the display data path
*
* If a handle is obtained successfully, it is not returned to the caller; it
* is saved for later use by subsequent nv_imp_icc_set_bw calls.
* nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw.
*
* @param[out] nv Per GPU Linux state
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_get
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_OK;
#if defined(NV_DEVM_ICC_GET_PRESENT)
// Needs to use devm_of_icc_get function as per the latest ICC driver
nvl->nv_imp_icc_path =
devm_of_icc_get(nvl->dev, "read-1");
#elif defined(NV_ICC_GET_PRESENT)
struct device_node *np;
nvl->nv_imp_icc_path = NULL;
// Check if ICC is present in the device tree, and enabled.
np = of_find_node_by_path("/icc");
if (np != NULL)
{
if (of_device_is_available(np))
{
// Get the ICC data path.
nvl->nv_imp_icc_path =
icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY);
}
of_node_put(np);
}
#endif
if (nvl->nv_imp_icc_path == NULL)
{
nv_printf(NV_DBG_INFO, "NVRM: Function for getting ICC path not present\n");
status = NV_ERR_NOT_SUPPORTED;
}
else if (IS_ERR(nvl->nv_imp_icc_path))
{
nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %s\n",
PTR_ERR(nvl->nv_imp_icc_path));
nvl->nv_imp_icc_path = NULL;
status = NV_ERR_GENERIC;
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Releases the handle obtained by nv_imp_icc_get
*
* @param[in] nv Per GPU Linux state
*/
void
nv_imp_icc_put
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// if devm_of_icc_get API is used for requesting the bandwidth, it does
// not require to call put explicitly.
#if !defined(NV_DEVM_ICC_GET_PRESENT)
#if defined(NV_ICC_PUT_PRESENT)
if (nvl->nv_imp_icc_path != NULL)
{
icc_put(nvl->nv_imp_icc_path);
}
#else
nv_printf(NV_DBG_ERRORS, "icc_put() not present\n");
#endif
#endif
nvl->nv_imp_icc_path = NULL;
#endif
}
/*!
* @brief Allocates a specified amount of ISO memory bandwidth for display
*
* floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency
* multiplied by the width of the pipe over which the display data will travel.
* (It is understood that the bandwidth calculated by multiplying the clock
* frequency by the pipe width will not be realistically achievable, due to
* overhead in the memory subsystem. ICC will not actually use the bandwidth
* value, except to reverse the calculation to get the required dramclk
* frequency.)
*
* nv_imp_icc_get must be called prior to calling this function.
*
* @param[in] nv Per GPU Linux state
* @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested
* @param[in] floor_bw_kbps Min required dramclk freq * pipe width
*
* @returns NV_OK if successful,
* NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
* high, and bandwidth cannot be allocated,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_set_bw
(
nv_state_t *nv,
NvU32 avg_bw_kbps,
NvU32 floor_bw_kbps
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
NV_STATUS status = NV_OK;
//
// avg_bw_kbps can be either ISO bw request or NISO bw request.
// Use floor_bw_kbps to make floor requests.
//
#if defined(NV_ICC_SET_BW_PRESENT)
//
// nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled.
// In this case, skip the allocation call, and just return a success
// status.
//
if (nvl->nv_imp_icc_path == NULL)
{
return NV_OK;
}
rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps);
#else
nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n");
return NV_ERR_NOT_SUPPORTED;
#endif
if (rc < 0)
{
// A negative return value indicates an error.
if (rc == -ENOMEM)
{
status = NV_ERR_INSUFFICIENT_RESOURCES;
}
else
{
status = NV_ERR_GENERIC;
}
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,155 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "dce_rm_client_ipc.h"
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
#include <linux/platform/tegra/dce/dce-client-ipc.h>
#if (NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_register_ipc_client && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_client_ipc_send_recv && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_unregister_ipc_client)
#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 1
#else
#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 0
#endif
#endif
#if (defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) && \
NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT)
static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = {
[DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM,
[DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT,
};
static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType)
{
if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
return NV_OK;
}
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX;
interfaceType++)
{
if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType)
return interfaceType;
}
return NV_ERR_INVALID_DATA;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX;
if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK)
{
return NV_ERR_INVALID_ARGUMENT;
}
dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType];
return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle);
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
struct dce_ipc_message dce_ipc_msg;
memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message));
dce_ipc_msg.tx.data = msg;
dce_ipc_msg.rx.data = msg;
dce_ipc_msg.tx.size = msgLength;
dce_ipc_msg.rx.size = msgLength;
return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg);
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return tegra_dce_unregister_ipc_client(clientId);
}
#else
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -0,0 +1,329 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION_ONSTACK(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -0,0 +1,232 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-memdbg.h"
#include "nv-linux.h"
/* track who's allocating memory and print out a list of leaked allocations at
* teardown.
*/
typedef struct {
struct rb_node rb_node;
void *addr;
NvU64 size;
NvU32 line;
const char *file;
} nv_memdbg_node_t;
struct
{
struct rb_root rb_root;
NvU64 untracked_bytes;
NvU64 num_untracked_allocs;
nv_spinlock_t lock;
} g_nv_memdbg;
void nv_memdbg_init(void)
{
NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock);
g_nv_memdbg.rb_root = RB_ROOT;
}
static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node)
{
return rb_entry(rb_node, nv_memdbg_node_t, rb_node);
}
static void nv_memdbg_insert_node(nv_memdbg_node_t *new)
{
nv_memdbg_node_t *node;
struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node;
struct rb_node *rb_parent = NULL;
while (*rb_node)
{
node = nv_memdbg_node_entry(*rb_node);
WARN_ON(new->addr == node->addr);
rb_parent = *rb_node;
if (new->addr < node->addr)
rb_node = &(*rb_node)->rb_left;
else
rb_node = &(*rb_node)->rb_right;
}
rb_link_node(&new->rb_node, rb_parent, rb_node);
rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root);
}
static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr)
{
nv_memdbg_node_t *node = NULL;
struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node;
while (rb_node)
{
node = nv_memdbg_node_entry(rb_node);
if (addr == node->addr)
break;
else if (addr < node->addr)
rb_node = rb_node->rb_left;
else
rb_node = rb_node->rb_right;
}
WARN_ON(!node || node->addr != addr);
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
return node;
}
void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
WARN_ON(addr == NULL);
/* If node allocation fails, we can still update the untracked counters */
node = kmalloc(sizeof(*node),
NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC);
if (node)
{
node->addr = addr;
node->size = size;
node->file = file;
node->line = line;
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
if (node)
{
nv_memdbg_insert_node(node);
}
else
{
++g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes += size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
}
void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
node = nv_memdbg_remove_node(addr);
if (!node)
{
WARN_ON(g_nv_memdbg.num_untracked_allocs == 0);
WARN_ON(g_nv_memdbg.untracked_bytes < size);
--g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes -= size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
if (node)
{
if ((size != 0) && (node->size != size))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: size mismatch on free: %llu != %llu\n",
size, node->size);
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p @ %s:%d\n",
node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p\n",
node->addr);
}
os_dbg_breakpoint();
}
kfree(node);
}
}
void nv_memdbg_exit(void)
{
nv_memdbg_node_t *node;
NvU64 leaked_bytes = 0, num_leaked_allocs = 0;
if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: list of leaked memory allocations:\n");
}
while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root));
leaked_bytes += node->size;
++num_leaked_allocs;
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p @ %s:%d\n",
node->size, node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p\n",
node->size, node->addr);
}
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
kfree(node);
}
/* If we failed to allocate a node at some point, we may have leaked memory
* even if the tree is empty */
if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: total leaked memory: %llu bytes in %llu allocations\n",
leaked_bytes + g_nv_memdbg.untracked_bytes,
num_leaked_allocs + g_nv_memdbg.num_untracked_allocs);
if (g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes in %llu allocations untracked\n",
g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs);
}
}
}

View File

@@ -0,0 +1,810 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv_speculation_barrier.h"
/*
* The 'struct vm_operations' open() callback is called by the Linux
* kernel when the parent VMA is split or copied, close() when the
* current VMA is about to be deleted.
*
* We implement these callbacks to keep track of the number of user
* mappings of system memory allocations. This was motivated by a
* subtle interaction problem between the driver and the kernel with
* respect to the bookkeeping of pages marked reserved and later
* mapped with mmap().
*
* Traditionally, the Linux kernel ignored reserved pages, such that
* when they were mapped via mmap(), the integrity of their usage
* counts depended on the reserved bit being set for as long as user
* mappings existed.
*
* Since we mark system memory pages allocated for DMA reserved and
* typically map them with mmap(), we need to ensure they remain
* reserved until the last mapping has been torn down. This worked
* correctly in most cases, but in a few, the RM API called into the
* RM to free memory before calling munmap() to unmap it.
*
* In the past, we allowed nv_free_pages() to remove the 'at' from
* the parent device's allocation list in this case, but didn't
* release the underlying pages until the last user mapping had been
* destroyed:
*
* In nvidia_vma_release(), we freed any resources associated with
* the allocation (IOMMU mappings, etc.) and cleared the
* underlying pages' reserved bits, but didn't free them. The kernel
* was expected to do this.
*
* This worked in practise, but made dangerous assumptions about the
* kernel's behavior and could fail in some cases. We now handle
* this case differently (see below).
*/
static void
nvidia_vma_open(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL)
{
NV_ATOMIC_INC(at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
}
}
/*
* (see above for additional information)
*
* If the 'at' usage count drops to zero with the updated logic, the
* the allocation is recorded in the free list of the private
* data associated with the file pointer; nvidia_close() uses this
* list to perform deferred free operations when the parent file
* descriptor is closed. This will typically happen when the process
* exits.
*
* Since this is technically a workaround to handle possible fallout
* from misbehaving clients, we additionally print a warning.
*/
static void
nvidia_vma_release(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
static int count = 0;
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL && nv_alloc_release(nvlfp, at))
{
if ((at->pid == os_get_current_process()) &&
(count++ < NV_MAX_RECURRING_WARNING_MESSAGES))
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: late unmap, comm: %s, 0x%p\n",
__FUNCTION__, current->comm, at);
}
}
}
static int
nvidia_vma_access(
struct vm_area_struct *vma,
unsigned long addr,
void *buffer,
int length,
int write
)
{
nv_alloc_t *at = NULL;
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr);
NvU32 pageIndex, pageOffset;
void *kernel_mapping;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
NvU64 offset;
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
pageOffset = (addr & ~PAGE_MASK);
if (length < 0)
{
return -EINVAL;
}
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
return -EINVAL;
}
offset = mmap_context->mmap_start;
if (nv->flags & NV_FLAG_CONTROL)
{
at = NV_VMA_PRIVATE(vma);
/*
* at can be NULL for peer IO mem.
*/
if (!at)
return -EINVAL;
if (pageIndex >= at->num_pages)
return -EINVAL;
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
#else
nv_speculation_barrier();
#endif
kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
}
else if (IS_FB_OFFSET(nv, offset, length))
{
addr = (offset & PAGE_MASK);
kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED);
if (kernel_mapping == NULL)
return -ENOMEM;
kernel_mapping = ((char *)kernel_mapping + pageOffset);
}
else
return -EINVAL;
length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset));
if (write)
memcpy(kernel_mapping, buffer, length);
else
memcpy(buffer, kernel_mapping, length);
if (at == NULL)
{
kernel_mapping = ((char *)kernel_mapping - pageOffset);
os_unmap_kernel_space(kernel_mapping, PAGE_SIZE);
}
return length;
}
static vm_fault_t nvidia_fault(
#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma,
#endif
struct vm_fault *vmf
)
{
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma = vmf->vma;
#endif
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_linux_state_t *nvl = nvlfp->nvptr;
nv_state_t *nv = NV_STATE_PTR(nvl);
vm_fault_t ret = VM_FAULT_NOPAGE;
NvU64 page;
NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
NvU64 pfn_start = (nvlfp->mmap_context.mmap_start >> PAGE_SHIFT);
if (vma->vm_pgoff != 0)
{
return VM_FAULT_SIGBUS;
}
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return VM_FAULT_SIGBUS;
}
// Wake up GPU and reinstate mappings only if we are not in S3/S4 entry
if (!down_read_trylock(&nv_system_pm_lock))
{
return VM_FAULT_NOPAGE;
}
down(&nvl->mmap_lock);
// Wake up the GPU if it is not currently safe to mmap.
if (!nvl->safe_to_mmap)
{
NV_STATUS status;
if (!nvl->gpu_wakeup_callback_needed)
{
// GPU wakeup callback already scheduled.
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
/*
* GPU wakeup cannot be completed directly in the fault handler due to the
* inability to take the GPU lock while mmap_lock is held.
*/
status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv);
if (status != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status);
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_SIGBUS;
}
// Ensure that we do not schedule duplicate GPU wakeup callbacks.
nvl->gpu_wakeup_callback_needed = NV_FALSE;
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
// Safe to mmap, map all pages in this VMA.
for (page = 0; page < num_pages; page++)
{
NvU64 virt_addr = vma->vm_start + (page << PAGE_SHIFT);
NvU64 pfn = pfn_start + page;
ret = nv_insert_pfn(vma, virt_addr, pfn,
nvlfp->mmap_context.remap_prot_extra);
if (ret != VM_FAULT_NOPAGE)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: nv_insert_pfn failed: %x\n", ret);
break;
}
nvl->all_mappings_revoked = NV_FALSE;
}
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return ret;
}
static struct vm_operations_struct nv_vm_ops = {
.open = nvidia_vma_open,
.close = nvidia_vma_release,
.fault = nvidia_fault,
.access = nvidia_vma_access,
};
int nv_encode_caching(
pgprot_t *prot,
NvU32 cache_type,
nv_memory_type_t memory_type
)
{
pgprot_t tmp;
if (prot == NULL)
{
tmp = __pgprot(0);
prot = &tmp;
}
switch (cache_type)
{
case NV_MEMORY_UNCACHED_WEAK:
#if defined(NV_PGPROT_UNCACHED_WEAK)
*prot = NV_PGPROT_UNCACHED_WEAK(*prot);
break;
#endif
case NV_MEMORY_UNCACHED:
*prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ?
NV_PGPROT_UNCACHED(*prot) :
NV_PGPROT_UNCACHED_DEVICE(*prot);
break;
#if defined(NV_PGPROT_WRITE_COMBINED) && \
defined(NV_PGPROT_WRITE_COMBINED_DEVICE)
case NV_MEMORY_DEFAULT:
case NV_MEMORY_WRITECOMBINED:
if (NV_ALLOW_WRITE_COMBINING(memory_type))
{
*prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ?
NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) :
NV_PGPROT_WRITE_COMBINED(*prot);
break;
}
/*
* If WC support is unavailable, we need to return an error
* code to the caller, but need not print a warning.
*
* For frame buffer memory, callers are expected to use the
* UC- memory type if we report WC as unsupported, which
* translates to the effective memory type WC if a WC MTRR
* exists or else UC.
*/
return 1;
#endif
case NV_MEMORY_CACHED:
if (!NV_ALLOW_CACHING(memory_type))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: memory type %d does not allow caching!\n",
memory_type);
return 1;
}
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: cache type %d not supported for memory type %d!\n",
cache_type, memory_type);
return 1;
}
return 0;
}
int static nvidia_mmap_peer_io(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
int ret;
NvU64 start;
NvU64 size;
BUG_ON(!at->flags.contig);
start = at->page_table[page_index]->phys_addr;
size = pages * PAGE_SIZE;
ret = nv_io_remap_page_range(vma, start, size, 0);
return ret;
}
int static nvidia_mmap_sysmem(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
NvU64 j;
int ret = 0;
unsigned long start = 0;
NV_ATOMIC_INC(at->usage_count);
start = vma->vm_start;
for (j = page_index; j < (page_index + pages); j++)
{
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
j = nv_array_index_no_speculate(j, (page_index + pages));
#else
nv_speculation_barrier();
#endif
#if defined(NV_VGPU_KVM_BUILD)
if (at->flags.guest)
{
ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr,
PAGE_SIZE, vma->vm_page_prot);
}
else
#endif
{
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
ret = vm_insert_page(vma, start,
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
}
if (ret)
{
NV_ATOMIC_DEC(at->usage_count);
return -EAGAIN;
}
start += PAGE_SIZE;
}
return ret;
}
static int nvidia_mmap_numa(
struct vm_area_struct *vma,
const nv_alloc_mapping_context_t *mmap_context)
{
NvU64 start, addr;
NvU64 pages;
NvU64 i;
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
start = vma->vm_start;
if (mmap_context->num_pages < pages)
{
return -EINVAL;
}
// Needed for the linux kernel for mapping compound pages
nv_vm_flags_set(vma, VM_MIXEDMAP);
for (i = 0, addr = mmap_context->page_array[0]; i < pages;
addr = mmap_context->page_array[++i], start += PAGE_SIZE)
{
if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0)
{
return -EAGAIN;
}
}
return 0;
}
int nvidia_mmap_helper(
nv_state_t *nv,
nv_linux_file_private_t *nvlfp,
nvidia_stack_t *sp,
struct vm_area_struct *vma,
void *vm_priv
)
{
NvU32 prot = 0;
int ret;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
if (nvlfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
/*
* If mmap context is not valid on this file descriptor, this mapping wasn't
* previously validated with the RM so it must be rejected.
*/
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n");
return -EINVAL;
}
if (vma->vm_pgoff != 0)
{
return -EINVAL;
}
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
status = nv_check_gpu_state(nv);
if (status != NV_OK)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv,
"GPU is lost, skipping nvidia_mmap_helper\n");
return status;
}
NV_VMA_PRIVATE(vma) = vm_priv;
prot = mmap_context->prot;
/*
* Nvidia device node(nvidia#) maps device's BAR memory,
* Nvidia control node(nvidiactrl) maps system memory.
*/
if (!NV_IS_CTL_DEVICE(nv))
{
NvU32 remap_prot_extra = mmap_context->remap_prot_extra;
NvU64 mmap_start = mmap_context->mmap_start;
NvU64 mmap_length = mmap_context->mmap_size;
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
// validate the size
if (NV_VMA_SIZE(vma) != mmap_length)
{
return -ENXIO;
}
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_REGISTERS))
{
return -ENXIO;
}
}
else if (IS_FB_OFFSET(nv, access_start, access_len))
{
if (IS_UD_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
if (nv_encode_caching(&vma->vm_page_prot,
NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
}
}
down(&nvl->mmap_lock);
if (nvl->safe_to_mmap)
{
nvl->all_mappings_revoked = NV_FALSE;
//
// This path is similar to the sysmem mapping code.
// TODO: Refactor is needed as part of bug#2001704.
// Use pfn_valid to determine whether the physical address has
// backing struct page. This is used to isolate P8 from P9.
//
if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) &&
!IS_REG_OFFSET(nv, access_start, access_len) &&
(pfn_valid(PFN_DOWN(mmap_start))))
{
ret = nvidia_mmap_numa(vma, mmap_context);
if (ret)
{
up(&nvl->mmap_lock);
return ret;
}
}
else
{
if (nv_io_remap_page_range(vma, mmap_start, mmap_length,
remap_prot_extra) != 0)
{
up(&nvl->mmap_lock);
return -EAGAIN;
}
}
}
up(&nvl->mmap_lock);
nv_vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND);
}
else
{
nv_alloc_t *at;
NvU64 page_index;
NvU64 pages;
NvU64 mmap_size;
at = (nv_alloc_t *)mmap_context->alloc;
page_index = mmap_context->page_index;
mmap_size = NV_VMA_SIZE(vma);
pages = mmap_size >> PAGE_SHIFT;
if ((page_index + pages) > at->num_pages)
{
return -ERANGE;
}
/*
* Callers that pass in non-NULL VMA private data must never reach this
* code. They should be mapping on a non-control node.
*/
BUG_ON(NV_VMA_PRIVATE(vma));
if (at->flags.peer_io)
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_DEVICE_MMIO))
{
return -ENXIO;
}
/*
* There is no need to keep 'peer IO at' alive till vma_release like
* 'sysmem at' because there are no security concerns where a client
* could free RM allocated sysmem before unmapping it. Hence, vm_ops
* are NOP, and at->usage_count is never being used.
*/
NV_VMA_PRIVATE(vma) = NULL;
ret = nvidia_mmap_peer_io(vma, at, page_index, pages);
BUG_ON(NV_VMA_PRIVATE(vma));
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_SYSTEM))
{
return -ENXIO;
}
NV_VMA_PRIVATE(vma) = at;
ret = nvidia_mmap_sysmem(vma, at, page_index, pages);
}
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
nv_vm_flags_set(vma, VM_IO | VM_LOCKED | VM_RESERVED);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
nv_vm_flags_clear(vma, VM_WRITE);
nv_vm_flags_clear(vma, VM_MAYWRITE);
}
vma->vm_ops = &nv_vm_ops;
return 0;
}
int nvidia_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
nv_state_t *nv = NV_STATE_PTR(nvl);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
nvidia_stack_t *sp = NULL;
int status;
//
// Do not allow mmap operation if this is a fd into
// which rm objects have been exported.
//
if (nvlfp->nvfp.handles != NULL)
{
return -EINVAL;
}
status = nv_kmem_cache_alloc_stack(&sp);
if (status != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for mmap\n");
return status;
}
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
nv_kmem_cache_free_stack(sp);
return status;
}
void
nv_revoke_gpu_mappings_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_linux_file_private_t *nvlfp;
/* Revoke all mappings for every open file */
list_for_each_entry (nvlfp, &nvl->open_files, entry)
{
unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1);
}
nvl->all_mappings_revoked = NV_TRUE;
}
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return NV_ERR_NOT_SUPPORTED;
}
down(&nvl->mmap_lock);
nv_revoke_gpu_mappings_locked(nv);
up(&nvl->mmap_lock);
return NV_OK;
}
void NV_API_CALL nv_acquire_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
down(&nvl->mmap_lock);
}
void NV_API_CALL nv_release_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
up(&nvl->mmap_lock);
}
NvBool NV_API_CALL nv_get_all_mappings_revoked_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock for all decisions based on this
return nvl->all_mappings_revoked;
}
void NV_API_CALL nv_set_safe_to_mmap_locked(
nv_state_t *nv,
NvBool safe_to_mmap
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock
/*
* If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to
* need to schedule a GPU wakeup callback when we fault.
*
* nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault()
* after scheduling the GPU wakeup callback, preventing us from scheduling
* duplicates.
*/
if (!safe_to_mmap && nvl->safe_to_mmap)
{
nvl->gpu_wakeup_callback_needed = NV_TRUE;
}
nvl->safe_to_mmap = safe_to_mmap;
}

View File

@@ -0,0 +1,146 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-modeset-interface.h"
#include "os-interface.h"
#include "nv-linux.h"
#include "nvstatus.h"
#include "nv.h"
static const nvidia_modeset_callbacks_t *nv_modeset_callbacks;
static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp)
{
return nv_kmem_cache_alloc_stack(sp);
}
static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp)
{
if (sp != NULL)
{
nv_kmem_cache_free_stack(sp);
}
}
static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb)
{
if ((nv_modeset_callbacks != NULL && cb != NULL) ||
(nv_modeset_callbacks == NULL && cb == NULL))
{
return -EINVAL;
}
nv_modeset_callbacks = cb;
return 0;
}
void nvidia_modeset_suspend(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->suspend(gpuId);
}
}
void nvidia_modeset_resume(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->resume(gpuId);
}
}
static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info)
{
nv_linux_state_t *nvl;
unsigned int count;
LOCK_NV_LINUX_DEVICES();
count = 0;
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
{
nv_state_t *nv = NV_STATE_PTR(nvl);
/*
* The gpu_info[] array has NV_MAX_GPUS elements. Fail if there
* are more GPUs than that.
*/
if (count >= NV_MAX_GPUS) {
nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.",
NV_MAX_GPUS);
count = 0;
break;
}
gpu_info[count].gpu_id = nv->gpu_id;
gpu_info[count].pci_info.domain = nv->pci_info.domain;
gpu_info[count].pci_info.bus = nv->pci_info.bus;
gpu_info[count].pci_info.slot = nv->pci_info.slot;
gpu_info[count].pci_info.function = nv->pci_info.function;
gpu_info[count].os_device_ptr = nvl->dev;
count++;
}
UNLOCK_NV_LINUX_DEVICES();
return count;
}
NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops)
{
const nvidia_modeset_rm_ops_t local_rm_ops = {
.version_string = NV_VERSION_STRING,
.system_info = {
.allow_write_combining = NV_FALSE,
},
.alloc_stack = nvidia_modeset_rm_ops_alloc_stack,
.free_stack = nvidia_modeset_rm_ops_free_stack,
.enumerate_gpus = nvidia_modeset_enumerate_gpus,
.open_gpu = nvidia_dev_get,
.close_gpu = nvidia_dev_put,
.op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */
.set_callbacks = nvidia_modeset_set_callbacks,
};
if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0)
{
rm_ops->version_string = NV_VERSION_STRING;
return NV_ERR_GENERIC;
}
*rm_ops = local_rm_ops;
if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) {
rm_ops->system_info.allow_write_combining = NV_TRUE;
}
return NV_OK;
}
EXPORT_SYMBOL(nvidia_get_rm_ops);

167
kernel-open/nvidia/nv-msi.c Normal file
View File

@@ -0,0 +1,167 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-msi.h"
#include "nv-proto.h"
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
void NV_API_CALL nv_init_msi(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc = 0;
rc = pci_enable_msi(nvl->pci_dev);
if (rc == 0)
{
nv->interrupt_line = nvl->pci_dev->irq;
nv->flags |= NV_FLAG_USES_MSI;
nvl->num_intr = 1;
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
if (nvl->irq_count == NULL)
{
nv->flags &= ~NV_FLAG_USES_MSI;
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to allocate counter for MSI entry; "
"falling back to PCIe virtual-wire interrupts.\n");
}
else
{
nvl->current_num_irq_tracked = 0;
}
}
else
{
nv->flags &= ~NV_FLAG_USES_MSI;
if (nvl->pci_dev->irq != 0)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to enable MSI; "
"falling back to PCIe virtual-wire interrupts.\n");
}
}
return;
}
void NV_API_CALL nv_init_msix(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int num_intr = 0;
struct msix_entry *msix_entries;
int rc = 0;
int i;
NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock);
rc = os_alloc_mutex(&nvl->msix_bh_mutex);
if (rc != 0)
goto failed;
num_intr = nv_get_max_irq(nvl->pci_dev);
if (num_intr > NV_RM_MAX_MSIX_LINES)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the "
"driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES);
num_intr = NV_RM_MAX_MSIX_LINES;
}
NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
if (nvl->msix_entries == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n");
goto failed;
}
for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++)
{
msix_entries->entry = i;
}
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
if (nvl->irq_count == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n");
goto failed;
}
else
{
nvl->current_num_irq_tracked = 0;
}
rc = nv_pci_enable_msix(nvl, num_intr);
if (rc != NV_OK)
goto failed;
nv->flags |= NV_FLAG_USES_MSIX;
return;
failed:
nv->flags &= ~NV_FLAG_USES_MSIX;
if (nvl->msix_entries)
{
NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
}
if (nvl->irq_count)
{
NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
}
if (nvl->msix_bh_mutex)
{
os_free_mutex(nvl->msix_bh_mutex);
nvl->msix_bh_mutex = NULL;
}
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n");
}
NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl)
{
int i;
int j;
struct msix_entry *msix_entries;
int rc = NV_ERR_INVALID_ARGUMENT;
nv_state_t *nv = NV_STATE_PTR(nvl);
for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr;
i++, msix_entries++)
{
rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix,
nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv),
nv_device_name, (void *)nvl);
if (rc)
{
for( j = 0; j < i; j++)
{
free_irq(nvl->msix_entries[i].vector, (void *)nvl);
}
break;
}
}
return rc;
}
#endif

View File

@@ -0,0 +1,233 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/kernel.h> // For container_of
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/timer.h>
#include "os-interface.h"
#include "nv-linux.h"
#if !defined(NVCPU_PPC64LE)
#define NV_NANO_TIMER_USE_HRTIMER 1
#else
#define NV_NANO_TIMER_USE_HRTIMER 0
#endif // !defined(NVCPU_PPC64LE)
struct nv_nano_timer
{
#if NV_NANO_TIMER_USE_HRTIMER
struct hrtimer hr_timer; // This parameter holds linux high resolution timer object
// can get replaced with platform specific timer object
#else
struct timer_list jiffy_timer;
#endif
nv_linux_state_t *nv_linux_state;
void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer);
void *pTmrEvent;
};
/*!
* @brief runs nano second resolution timer callback
*
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
static void
nvidia_nano_timer_callback(
nv_nano_timer_t *nv_nstimer)
{
nv_state_t *nv = NULL;
nv_linux_state_t *nvl = nv_nstimer->nv_linux_state;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n");
return;
}
nv = NV_STATE_PTR(nvl);
if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n");
}
nv_kmem_cache_free_stack(sp);
}
/*!
* @brief Allocates nano second resolution timer object
*
* @returns nv_nano_timer_t allocated pointer
*/
static nv_nano_timer_t *nv_alloc_nano_timer(void)
{
nv_nano_timer_t *nv_nstimer;
NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t));
if (nv_nstimer == NULL)
{
return NULL;
}
memset(nv_nstimer, 0, sizeof(nv_nano_timer_t));
return nv_nstimer;
}
#if NV_NANO_TIMER_USE_HRTIMER
static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr)
{
struct nv_nano_timer *nv_nstimer =
container_of(hrtmr, struct nv_nano_timer, hr_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
return HRTIMER_NORESTART;
}
#else
static inline void nv_jiffy_timer_callback_typed_data(struct timer_list *timer)
{
struct nv_nano_timer *nv_nstimer =
container_of(timer, struct nv_nano_timer, jiffy_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
}
static inline void nv_jiffy_timer_callback_anon_data(unsigned long arg)
{
struct nv_nano_timer *nv_nstimer = (struct nv_nano_timer *)arg;
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
}
#endif
/*!
* @brief Creates & initializes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] tmrEvent pointer to TMR_EVENT
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_create_nano_timer(
nv_state_t *nv,
void *pTmrEvent,
nv_nano_timer_t **pnv_nstimer)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer();
if (nv_nstimer == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n");
*pnv_nstimer = NULL;
return;
}
nv_nstimer->nv_linux_state = nvl;
nv_nstimer->pTmrEvent = pTmrEvent;
nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback;
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
#else
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0);
#else
init_timer(&nv_nstimer->jiffy_timer);
nv_nstimer->jiffy_timer.function = nv_jiffy_timer_callback_anon_data;
nv_nstimer->jiffy_timer.data = (unsigned long)nv_nstimer;
#endif // NV_TIMER_SETUP_PRESENT
#endif // NV_NANO_TIMER_USE_HRTIMER
*pnv_nstimer = nv_nstimer;
}
/*!
* @brief Starts nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
* @param[in] time_ns Relative time in nano seconds
*/
void NV_API_CALL nv_start_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer,
NvU64 time_ns)
{
#if NV_NANO_TIMER_USE_HRTIMER
ktime_t ktime = ktime_set(0, time_ns);
hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL);
#else
unsigned long time_jiffies;
NvU32 time_us;
time_us = (NvU32)(time_ns / 1000);
if (time_us == 0)
{
nv_printf(NV_DBG_WARNINGS, "NVRM: Timer value cannot be less than 1 usec.\n");
}
time_jiffies = usecs_to_jiffies(time_us);
mod_timer(&nv_nstimer->jiffy_timer, jiffies + time_jiffies);
#endif
}
/*!
* @brief Cancels nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_cancel_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_cancel(&nv_nstimer->hr_timer);
#else
del_timer_sync(&nv_nstimer->jiffy_timer);
#endif
}
/*!
* @brief Cancels & deletes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_destroy_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
nv_cancel_nano_timer(nv, nv_nstimer);
NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t));
}

750
kernel-open/nvidia/nv-p2p.c Normal file
View File

@@ -0,0 +1,750 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-ibmnpu.h"
#include "nv-rsync.h"
#include "nv-p2p.h"
#include "rmp2pdefines.h"
typedef enum nv_p2p_page_table_type {
NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT = 0,
NV_P2P_PAGE_TABLE_TYPE_PERSISTENT,
} nv_p2p_page_table_type_t;
typedef struct nv_p2p_dma_mapping {
struct list_head list_node;
struct nvidia_p2p_dma_mapping *dma_mapping;
} nv_p2p_dma_mapping_t;
typedef struct nv_p2p_mem_info {
void (*free_callback)(void *data);
void *data;
struct nvidia_p2p_page_table page_table;
struct {
struct list_head list_head;
struct semaphore lock;
} dma_mapping_list;
void *private;
} nv_p2p_mem_info_t;
// declared and created in nv.c
extern void *nvidia_p2p_page_t_cache;
static struct nvidia_status_mapping {
NV_STATUS status;
int error;
} nvidia_status_mappings[] = {
{ NV_ERR_GENERIC, -EIO },
{ NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM },
{ NV_ERR_NO_MEMORY, -ENOMEM },
{ NV_ERR_INVALID_ARGUMENT, -EINVAL },
{ NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL },
{ NV_ERR_INVALID_STATE, -EIO },
{ NV_ERR_NOT_SUPPORTED, -ENOTSUPP },
{ NV_ERR_OBJECT_NOT_FOUND, -EINVAL },
{ NV_ERR_STATE_IN_USE, -EBUSY },
{ NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV },
{ NV_OK, 0 },
};
#define NVIDIA_STATUS_MAPPINGS \
(sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping))
static int nvidia_p2p_map_status(NV_STATUS status)
{
int error = -EIO;
uint8_t i;
for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++)
{
if (nvidia_status_mappings[i].status == status)
{
error = nvidia_status_mappings[i].error;
break;
}
}
return error;
}
static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = {
NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K
};
static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index)
{
NvU32 i;
for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++)
{
if (nvidia_p2p_page_size_mappings[i] == page_size)
{
*page_size_index = i;
break;
}
}
if (i == NVIDIA_P2P_PAGE_SIZE_COUNT)
return NV_ERR_GENERIC;
return NV_OK;
}
static NV_STATUS nv_p2p_insert_dma_mapping(
struct nv_p2p_mem_info *mem_info,
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
NV_STATUS status;
struct nv_p2p_dma_mapping *node;
status = os_alloc_mem((void**)&node, sizeof(*node));
if (status != NV_OK)
{
return status;
}
down(&mem_info->dma_mapping_list.lock);
node->dma_mapping = dma_mapping;
list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head);
up(&mem_info->dma_mapping_list.lock);
return NV_OK;
}
static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping(
struct nv_p2p_mem_info *mem_info,
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
struct nv_p2p_dma_mapping *cur;
struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL;
down(&mem_info->dma_mapping_list.lock);
list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node)
{
if (dma_mapping == NULL || dma_mapping == cur->dma_mapping)
{
ret_dma_mapping = cur->dma_mapping;
list_del(&cur->list_node);
os_free_mem(cur);
break;
}
}
up(&mem_info->dma_mapping_list.lock);
return ret_dma_mapping;
}
static void nv_p2p_free_dma_mapping(
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
nv_dma_device_t peer_dma_dev = {{ 0 }};
NvU32 page_size;
NV_STATUS status;
NvU32 i;
peer_dma_dev.dev = &dma_mapping->pci_dev->dev;
peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask;
page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type];
if (dma_mapping->private != NULL)
{
WARN_ON(page_size != PAGE_SIZE);
status = nv_dma_unmap_alloc(&peer_dma_dev,
dma_mapping->entries,
dma_mapping->dma_addresses,
&dma_mapping->private);
WARN_ON(status != NV_OK);
}
else
{
for (i = 0; i < dma_mapping->entries; i++)
{
nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE,
dma_mapping->dma_addresses[i]);
}
}
os_free_mem(dma_mapping->dma_addresses);
os_free_mem(dma_mapping);
}
static void nv_p2p_free_page_table(
struct nvidia_p2p_page_table *page_table
)
{
NvU32 i;
struct nvidia_p2p_dma_mapping *dma_mapping;
struct nv_p2p_mem_info *mem_info = NULL;
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
while (dma_mapping != NULL)
{
nv_p2p_free_dma_mapping(dma_mapping);
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
}
for (i = 0; i < page_table->entries; i++)
{
NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache);
}
if (page_table->gpu_uuid != NULL)
{
os_free_mem(page_table->gpu_uuid);
}
if (page_table->pages != NULL)
{
os_free_mem(page_table->pages);
}
os_free_mem(mem_info);
}
static NV_STATUS nv_p2p_put_pages(
nv_p2p_page_table_type_t pt_type,
nvidia_stack_t * sp,
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
struct nvidia_p2p_page_table **page_table
)
{
NV_STATUS status;
/*
* rm_p2p_put_pages returns NV_OK if the page_table was found and
* got unlinked from the RM's tracker (atomically). This ensures that
* RM's tear-down path does not race with this path.
*
* rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table
* was already unlinked.
*/
if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT)
{
struct nv_p2p_mem_info *mem_info = NULL;
/*
* It is safe to access persistent page_table as there is no async
* callback which can free it unlike non-persistent page_table.
*/
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table);
}
else
{
status = rm_p2p_put_pages(sp, p2p_token, va_space,
virtual_address, *page_table);
}
if (status == NV_OK)
{
nv_p2p_free_page_table(*page_table);
*page_table = NULL;
}
else if ((pt_type == NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT) &&
(status == NV_ERR_OBJECT_NOT_FOUND))
{
status = NV_OK;
*page_table = NULL;
}
else
{
WARN_ON(status != NV_OK);
}
return status;
}
void NV_API_CALL nv_p2p_free_platform_data(
void *data
)
{
if (data == NULL)
{
WARN_ON(data == NULL);
return;
}
nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data);
}
int nvidia_p2p_init_mapping(
uint64_t p2p_token,
struct nvidia_p2p_params *params,
void (*destroy_callback)(void *data),
void *data
)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
static void nv_p2p_mem_info_free_callback(void *data)
{
nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data;
mem_info->free_callback(mem_info->data);
nv_p2p_free_platform_data(&mem_info->page_table);
}
static int nv_p2p_get_pages(
nv_p2p_page_table_type_t pt_type,
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void * data),
void *data
)
{
NV_STATUS status;
nvidia_stack_t *sp = NULL;
struct nvidia_p2p_page *page;
struct nv_p2p_mem_info *mem_info = NULL;
NvU32 entries;
NvU32 *wreqmb_h = NULL;
NvU32 *rreqmb_h = NULL;
NvU64 *physical_addresses = NULL;
NvU32 page_count;
NvU32 i = 0;
NvBool bGetPages = NV_FALSE;
NvBool bGetUuid = NV_FALSE;
NvU32 page_size = NVRM_P2P_PAGESIZE_BIG_64K;
NvU32 page_size_index;
NvU64 temp_length;
NvU8 *gpu_uuid = NULL;
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
int rc;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
*page_table = NULL;
status = os_alloc_mem((void **)&mem_info, sizeof(*mem_info));
if (status != NV_OK)
{
goto failed;
}
memset(mem_info, 0, sizeof(*mem_info));
INIT_LIST_HEAD(&mem_info->dma_mapping_list.list_head);
NV_INIT_MUTEX(&mem_info->dma_mapping_list.lock);
*page_table = &(mem_info->page_table);
/*
* assign length to temporary variable since do_div macro does in-place
* division
*/
temp_length = length;
do_div(temp_length, page_size);
page_count = temp_length;
if (length & (page_size - 1))
{
page_count++;
}
status = os_alloc_mem((void **)&physical_addresses,
(page_count * sizeof(NvU64)));
if (status != NV_OK)
{
goto failed;
}
status = os_alloc_mem((void **)&wreqmb_h, (page_count * sizeof(NvU32)));
if (status != NV_OK)
{
goto failed;
}
status = os_alloc_mem((void **)&rreqmb_h, (page_count * sizeof(NvU32)));
if (status != NV_OK)
{
goto failed;
}
if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT)
{
void *gpu_info = NULL;
if ((p2p_token != 0) || (va_space != 0))
{
status = -ENOTSUPP;
goto failed;
}
status = rm_p2p_get_gpu_info(sp, virtual_address, length,
&gpu_uuid, &gpu_info);
if (status != NV_OK)
{
goto failed;
}
(*page_table)->gpu_uuid = gpu_uuid;
rc = nvidia_dev_get_uuid(gpu_uuid, sp);
if (rc != 0)
{
status = NV_ERR_GPU_UUID_NOT_FOUND;
goto failed;
}
os_mem_copy(uuid, gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
bGetUuid = NV_TRUE;
status = rm_p2p_get_pages_persistent(sp, virtual_address, length,
&mem_info->private,
physical_addresses, &entries,
*page_table, gpu_info);
if (status != NV_OK)
{
goto failed;
}
}
else
{
// Get regular old-style, non-persistent mappings
status = rm_p2p_get_pages(sp, p2p_token, va_space,
virtual_address, length, physical_addresses, wreqmb_h,
rreqmb_h, &entries, &gpu_uuid, *page_table);
if (status != NV_OK)
{
goto failed;
}
(*page_table)->gpu_uuid = gpu_uuid;
}
bGetPages = NV_TRUE;
status = os_alloc_mem((void *)&(*page_table)->pages,
(entries * sizeof(page)));
if (status != NV_OK)
{
goto failed;
}
(*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION;
for (i = 0; i < entries; i++)
{
page = NV_KMEM_CACHE_ALLOC(nvidia_p2p_page_t_cache);
if (page == NULL)
{
status = NV_ERR_NO_MEMORY;
goto failed;
}
memset(page, 0, sizeof(*page));
page->physical_address = physical_addresses[i];
page->registers.fermi.wreqmb_h = wreqmb_h[i];
page->registers.fermi.rreqmb_h = rreqmb_h[i];
(*page_table)->pages[i] = page;
(*page_table)->entries++;
}
status = nvidia_p2p_map_page_size(page_size, &page_size_index);
if (status != NV_OK)
{
goto failed;
}
(*page_table)->page_size = page_size_index;
os_free_mem(physical_addresses);
os_free_mem(wreqmb_h);
os_free_mem(rreqmb_h);
if (free_callback != NULL)
{
mem_info->free_callback = free_callback;
mem_info->data = data;
status = rm_p2p_register_callback(sp, p2p_token, virtual_address, length,
*page_table, nv_p2p_mem_info_free_callback, mem_info);
if (status != NV_OK)
{
goto failed;
}
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
failed:
if (physical_addresses != NULL)
{
os_free_mem(physical_addresses);
}
if (wreqmb_h != NULL)
{
os_free_mem(wreqmb_h);
}
if (rreqmb_h != NULL)
{
os_free_mem(rreqmb_h);
}
if (bGetPages)
{
(void)nv_p2p_put_pages(pt_type, sp, p2p_token, va_space,
virtual_address, page_table);
}
if (bGetUuid)
{
nvidia_dev_put_uuid(uuid, sp);
}
if (*page_table != NULL)
{
nv_p2p_free_page_table(*page_table);
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
int nvidia_p2p_register_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
void *data
)
{
if (driver == NULL)
{
return -EINVAL;
}
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
{
return -EINVAL;
}
if (driver->get_relaxed_ordering_mode == NULL ||
driver->put_relaxed_ordering_mode == NULL ||
driver->wait_for_rsync == NULL)
{
return -EINVAL;
}
return nv_register_rsync_driver(driver->get_relaxed_ordering_mode,
driver->put_relaxed_ordering_mode,
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
void nvidia_p2p_unregister_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
void *data
)
{
if (driver == NULL)
{
WARN_ON(1);
return;
}
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
{
WARN_ON(1);
return;
}
if (driver->get_relaxed_ordering_mode == NULL ||
driver->put_relaxed_ordering_mode == NULL ||
driver->wait_for_rsync == NULL)
{
WARN_ON(1);
return;
}
nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode,
driver->put_relaxed_ordering_mode,
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
int nvidia_p2p_get_rsync_registers(
nvidia_p2p_rsync_reg_info_t **reg_info
)
{
nv_linux_state_t *nvl;
nv_state_t *nv;
NV_STATUS status;
void *ptr = NULL;
NvU64 addr;
NvU64 size;
struct pci_dev *ibmnpu = NULL;
NvU32 index = 0;
NvU32 count = 0;
nvidia_p2p_rsync_reg_info_t *info = NULL;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return -EINVAL;
}
status = os_alloc_mem((void**)&info, sizeof(*info));
if (status != NV_OK)
{
return -ENOMEM;
}
memset(info, 0, sizeof(*info));
info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION;
LOCK_NV_LINUX_DEVICES();
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
{
count++;
}
status = os_alloc_mem((void**)&regs, (count * sizeof(*regs)));
if (status != NV_OK)
{
nvidia_p2p_put_rsync_registers(info);
UNLOCK_NV_LINUX_DEVICES();
return -ENOMEM;
}
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
{
nv = NV_STATE_PTR(nvl);
addr = 0;
size = 0;
status = nv_get_ibmnpu_genreg_info(nv, &addr, &size, (void**)&ibmnpu);
if (status != NV_OK)
{
continue;
}
ptr = nv_ioremap_nocache(addr, size);
if (ptr == NULL)
{
continue;
}
regs[index].ptr = ptr;
regs[index].size = size;
regs[index].gpu = nvl->pci_dev;
regs[index].ibmnpu = ibmnpu;
regs[index].cluster_id = 0;
regs[index].socket_id = nv_get_ibmnpu_chip_id(nv);
index++;
}
UNLOCK_NV_LINUX_DEVICES();
info->regs = regs;
info->entries = index;
if (info->entries == 0)
{
nvidia_p2p_put_rsync_registers(info);
return -ENODEV;
}
*reg_info = info;
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
void nvidia_p2p_put_rsync_registers(
nvidia_p2p_rsync_reg_info_t *reg_info
)
{
NvU32 i;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return;
}
if (reg_info->regs)
{
for (i = 0; i < reg_info->entries; i++)
{
regs = &reg_info->regs[i];
if (regs->ptr)
{
nv_iounmap(regs->ptr, regs->size);
}
}
os_free_mem(reg_info->regs);
}
os_free_mem(reg_info);
}
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);

478
kernel-open/nvidia/nv-p2p.h Normal file
View File

@@ -0,0 +1,478 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_P2P_H_
#define _NV_P2P_H_
/*
* NVIDIA P2P Structure Versioning
*
* For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will
* set the version field of the structure according to the definition used by
* the NVIDIA driver. The "major" field of the version is defined as the upper
* 16 bits, and the "minor" field of the version is defined as the lower 16
* bits. The version field will always be the first 4 bytes of the structure,
* and third-party drivers should check the value of this field in structures
* allocated by the NVIDIA driver to ensure runtime compatibility.
*
* In general, version numbers will be incremented as follows:
* - When a backwards-compatible change is made to the structure layout, the
* minor version for that structure will be incremented. Third-party drivers
* built against an older minor version will continue to work with the newer
* minor version used by the NVIDIA driver, without recompilation.
* - When a breaking change is made to the structure layout, the major version
* will be incremented. Third-party drivers built against an older major
* version require at least recompilation and potentially additional updates
* to use the new API.
*/
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
#define NVIDIA_P2P_MAJOR_VERSION(v) \
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
#define NVIDIA_P2P_MINOR_VERSION(v) \
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v))))
enum {
NVIDIA_P2P_ARCHITECTURE_TESLA = 0,
NVIDIA_P2P_ARCHITECTURE_FERMI,
NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI
};
#define NVIDIA_P2P_PARAMS_VERSION 0x00010001
enum {
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE
};
#define NVIDIA_P2P_GPU_UUID_LEN 16
typedef
struct nvidia_p2p_params {
uint32_t version;
uint32_t architecture;
union nvidia_p2p_mailbox_addresses {
struct {
uint64_t wmb_addr;
uint64_t wmb_data;
uint64_t rreq_addr;
uint64_t rcomp_addr;
uint64_t reserved[2];
} fermi;
} addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1];
} nvidia_p2p_params_t;
/*
* Macro for users to detect
* driver support for persistent pages.
*/
#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
/*
* This API is not supported.
*/
int nvidia_p2p_init_mapping(uint64_t p2p_token,
struct nvidia_p2p_params *params,
void (*destroy_callback)(void *data),
void *data);
/*
* This API is not supported.
*/
int nvidia_p2p_destroy_mapping(uint64_t p2p_token);
enum nvidia_p2p_page_size_type {
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
NVIDIA_P2P_PAGE_SIZE_64KB,
NVIDIA_P2P_PAGE_SIZE_128KB,
NVIDIA_P2P_PAGE_SIZE_COUNT
};
typedef
struct nvidia_p2p_page {
uint64_t physical_address;
union nvidia_p2p_request_registers {
struct {
uint32_t wreqmb_h;
uint32_t rreqmb_h;
uint32_t rreqmb_0;
uint32_t reserved[3];
} fermi;
} registers;
} nvidia_p2p_page_t;
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
typedef
struct nvidia_p2p_page_table {
uint32_t version;
uint32_t page_size; /* enum nvidia_p2p_page_size_type */
struct nvidia_p2p_page **pages;
uint32_t entries;
uint8_t *gpu_uuid;
} nvidia_p2p_page_table_t;
/*
* @brief
* Make the pages underlying a range of GPU virtual memory
* accessible to a third-party device.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] free_callback
* A pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address, uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* @brief
* Pin and make the pages underlying a range of GPU virtual memory
* accessible to a third-party device. The pages will persist until
* explicitly freed by nvidia_p2p_put_pages_persistent().
*
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages_persistent(uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
uint32_t flags);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
struct pci_dev;
typedef
struct nvidia_p2p_dma_mapping {
uint32_t version;
enum nvidia_p2p_page_size_type page_size_type;
uint32_t entries;
uint64_t *dma_addresses;
void *private;
struct pci_dev *pci_dev;
} nvidia_p2p_dma_mapping_t;
/*
* @brief
* Make the physical pages retrieved using nvidia_p2p_get_pages accessible to
* a third-party device.
*
* @param[in] peer
* The struct pci_dev * of the peer device that needs to DMA to/from the
* mapping.
* @param[in] page_table
* The page table outlining the physical pages underlying the mapping, as
* retrieved with nvidia_p2p_get_pages().
* @param[out] dma_mapping
* The DMA mapping containing the DMA addresses to use on the third-party
* device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_map_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **dma_mapping);
/*
* @brief
* Unmap the physical pages previously mapped to the third-party device by
* nvidia_p2p_dma_map_pages().
*
* @param[in] peer
* The struct pci_dev * of the peer device that the DMA mapping belongs to.
* @param[in] page_table
* The page table backing the DMA mapping to be unmapped.
* @param[in] dma_mapping
* The DMA mapping containing the DMA addresses used by the third-party
* device, as retrieved with nvidia_p2p_dma_map_pages(). After this call
* returns, neither this struct nor the addresses contained within will be
* valid for use by the third-party device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping *dma_mapping);
/*
* @brief
* Release a set of pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages(uint64_t p2p_token,
uint32_t va_space, uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Release a set of persistent pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages_persistent(uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table,
uint32_t flags);
/*
* @brief
* Free a third-party P2P page table. (This function is a no-op.)
*
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Free a third-party P2P DMA mapping. (This function is a no-op.)
*
* @param[in] dma_mapping
* A pointer to the DMA mapping structure.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION)
typedef
struct nvidia_p2p_rsync_driver {
uint32_t version;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
} nvidia_p2p_rsync_driver_t;
/*
* @brief
* Registers the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure. The NVIDIA driver would use,
*
* get_relaxed_ordering_mode to obtain a reference to the current relaxed
* ordering mode (treated as a boolean) from the rsync driver.
*
* put_relaxed_ordering_mode to release a reference to the current relaxed
* ordering mode back to the rsync driver. The NVIDIA driver will call this
* function once for each successful call to get_relaxed_ordering_mode, and
* the relaxed ordering mode must not change until the last reference is
* released.
*
* wait_for_rsync to call into the rsync module to issue RSYNC. This callback
* can't sleep or re-schedule as it may arrive under spinlocks.
* @param[in] data
* A pointer to the rsync driver's private data.
*
* @Returns
* 0 upon successful completion.
* -EINVAL parameters are incorrect.
* -EBUSY if a module is already registered or GPU devices are in use.
*/
int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
/*
* @brief
* Unregisters the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure.
* @param[in] data
* A pointer to the rsync driver's private data.
*/
void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION)
typedef struct nvidia_p2p_rsync_reg {
void *ptr;
size_t size;
struct pci_dev *ibmnpu;
struct pci_dev *gpu;
uint32_t cluster_id;
uint32_t socket_id;
} nvidia_p2p_rsync_reg_t;
typedef struct nvidia_p2p_rsync_reg_info {
uint32_t version;
nvidia_p2p_rsync_reg_t *regs;
size_t entries;
} nvidia_p2p_rsync_reg_info_t;
/*
* @brief
* Gets rsync (GEN-ID) register information associated with the supported
* NPUs.
*
* The caller would use the returned information {GPU device, NPU device,
* socket-id, cluster-id} to pick the optimal generation registers to issue
* RSYNC (NVLink HW flush).
*
* The interface allocates structures to return the information, hence
* nvidia_p2p_put_rsync_registers() must be called to free the structures.
*
* Note, cluster-id is hardcoded to zero as early system configurations would
* only support cluster mode i.e. all devices would share the same cluster-id
* (0). In the future, appropriate kernel support would be needed to query
* cluster-ids.
*
* @param[out] reg_info
* A pointer to the rsync reg info structure.
*
* @Returns
* 0 Upon successful completion. Otherwise, returns negative value.
*/
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
/*
* @brief
* Frees the structures allocated by nvidia_p2p_get_rsync_registers().
*
* @param[in] reg_info
* A pointer to the rsync reg info structure.
*/
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);
#endif /* _NV_P2P_H_ */

478
kernel-open/nvidia/nv-pat.c Normal file
View File

@@ -0,0 +1,478 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-pat.h"
int nv_pat_mode = NV_PAT_MODE_DISABLED;
#if defined(NV_ENABLE_PAT_SUPPORT)
/*
* Private PAT support for use by the NVIDIA driver. This is used on
* kernels that do not modify the PAT to include a write-combining
* entry.
*
* On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the
* WC entry is as expected before using PAT.
*/
#if defined(CONFIG_X86_PAT)
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0
#else
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1
#endif
#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2))
#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2))
#define NV_PAT_ENTRY(pat, index) \
(((pat) & (0xff << ((index)*8))) >> ((index)*8))
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
static unsigned long orig_pat1, orig_pat2;
static inline void nv_disable_caches(unsigned long *cr4)
{
unsigned long cr0 = read_cr0();
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
wbinvd();
*cr4 = NV_READ_CR4();
if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80);
__flush_tlb();
}
static inline void nv_enable_caches(unsigned long cr4)
{
unsigned long cr0 = read_cr0();
wbinvd();
__flush_tlb();
write_cr0((cr0 & 0x9fffffff));
if (cr4 & 0x80) NV_WRITE_CR4(cr4);
}
static void nv_setup_pat_entries(void *info)
{
unsigned long pat1, pat2, cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_READ_PAT_ENTRIES(pat1, pat2);
pat1 &= 0xffff00ff;
pat1 |= 0x00000100;
NV_WRITE_PAT_ENTRIES(pat1, pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
static void nv_restore_pat_entries(void *info)
{
unsigned long cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
/*
* NOTE 1:
* Functions register_cpu_notifier(), unregister_cpu_notifier(),
* macros register_hotcpu_notifier, register_hotcpu_notifier,
* and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE
* were removed by the following commit:
* 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431
*
* NV_REGISTER_CPU_NOTIFIER_PRESENT is true when
* register_cpu_notifier() is present.
*
* The functions cpuhp_setup_state() and cpuhp_remove_state() should be
* used as an alternative to register_cpu_notifier() and
* unregister_cpu_notifier() functions. The following
* commit introduced these functions as well as the enum cpuhp_state.
* 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d
*
* NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present.
*
* For kernels where both cpuhp_setup_state() and register_cpu_notifier()
* are present, we still use register_cpu_notifier().
*/
static int
nvidia_cpu_teardown(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_restore_pat_entries(NULL);
else
smp_call_function(nv_restore_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int
nvidia_cpu_online(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_setup_pat_entries(NULL);
else
smp_call_function(nv_setup_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int nv_enable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2);
on_each_cpu(nv_setup_pat_entries, NULL, 1);
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2);
return 1;
}
static void nv_disable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
on_each_cpu(nv_restore_pat_entries, NULL, 1);
nv_pat_mode = NV_PAT_MODE_DISABLED;
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2);
}
static int
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
/* CPU_DOWN_FAILED was added by the following commit
* 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379
*
* CPU_DOWN_PREPARE was added by the following commit
* 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8
*
* We use one ifdef for both macros since they were added on the same day.
*/
#if defined(CPU_DOWN_FAILED)
switch (action)
{
case CPU_DOWN_FAILED:
case CPU_ONLINE:
nvidia_cpu_online((NvUPtr)hcpu);
break;
case CPU_DOWN_PREPARE:
nvidia_cpu_teardown((NvUPtr)hcpu);
break;
}
#endif
return NOTIFY_OK;
}
/*
* See NOTE 1.
* In order to avoid warnings for unused variable when compiling against
* kernel versions which include changes of commit id
* b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration
* of nv_hotcpu_nfb with #if.
*
* NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before
* NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused
* variable nvidia_pat_online for kernels where both
* NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT
* are true.
*/
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
static struct notifier_block nv_hotcpu_nfb = {
.notifier_call = nvidia_cpu_callback,
.priority = 0
};
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
static enum cpuhp_state nvidia_pat_online;
#endif
static int
nvidia_register_cpu_hotplug_notifier(void)
{
int ret;
/* See NOTE 1 */
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
/* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */
ret = register_hotcpu_notifier(&nv_hotcpu_nfb);
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
/*
* cpuhp_setup_state() returns positive number on success when state is
* CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"nvidia/pat:online",
nvidia_cpu_online,
nvidia_cpu_teardown);
if (ret < 0)
{
/*
* If cpuhp_setup_state() fails, the cpuhp_remove_state()
* should never be called. If it gets called, we might remove
* some other state. Hence, explicitly set
* nvidia_pat_online to zero. This will trigger a BUG()
* in cpuhp_remove_state().
*/
nvidia_pat_online = 0;
}
else
{
nvidia_pat_online = ret;
}
#else
/*
* This function should be a no-op for kernels which
* - do not have CONFIG_HOTPLUG_CPU enabled,
* - do not have PAT support,
* - do not have the cpuhp_setup_state() function.
*
* On such kernels, returning an error here would result in module init
* failure. Hence, return 0 here.
*/
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
ret = 0;
}
else
{
ret = -EIO;
}
#endif
if (ret < 0)
{
nv_disable_pat_support();
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU hotplug notifier registration failed!\n");
return -EIO;
}
return 0;
}
static void
nvidia_unregister_cpu_hotplug_notifier(void)
{
/* See NOTE 1 */
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
cpuhp_remove_state(nvidia_pat_online);
#endif
return;
}
#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_enable_builtin_pat_support(void)
{
return 0;
}
static void nv_disable_builtin_pat_support(void)
{
}
static int nvidia_register_cpu_hotplug_notifier(void)
{
return -EIO;
}
static void nvidia_unregister_cpu_hotplug_notifier(void)
{
}
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_determine_pat_mode(void)
{
unsigned int pat1, pat2, i;
NvU8 PAT_WC_index;
if (!test_bit(X86_FEATURE_PAT,
(volatile unsigned long *)&boot_cpu_data.x86_capability))
{
if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
(boot_cpu_data.cpuid_level < 1) ||
((cpuid_edx(1) & (1 << 16)) == 0) ||
(boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU does not support the PAT.\n");
return NV_PAT_MODE_DISABLED;
}
}
NV_READ_PAT_ENTRIES(pat1, pat2);
PAT_WC_index = 0xf;
for (i = 0; i < 4; i++)
{
if (NV_PAT_ENTRY(pat1, i) == 0x01)
{
PAT_WC_index = i;
break;
}
if (NV_PAT_ENTRY(pat2, i) == 0x01)
{
PAT_WC_index = (i + 4);
break;
}
}
if (PAT_WC_index == 1)
{
return NV_PAT_MODE_KERNEL;
}
else if (PAT_WC_index != 0xf)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: PAT configuration unsupported.\n");
return NV_PAT_MODE_DISABLED;
}
else
{
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
return NV_PAT_MODE_BUILTIN;
#else
return NV_PAT_MODE_DISABLED;
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
}
}
int nv_enable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_DISABLED)
return 1;
nv_pat_mode = nv_determine_pat_mode();
switch (nv_pat_mode)
{
case NV_PAT_MODE_DISABLED:
/* avoid the PAT if unavailable/unusable */
return 0;
case NV_PAT_MODE_KERNEL:
/* inherit the kernel's PAT layout */
return 1;
case NV_PAT_MODE_BUILTIN:
/* use builtin code to modify the PAT layout */
break;
}
return nv_enable_builtin_pat_support();
}
void nv_disable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_BUILTIN)
return;
nv_disable_builtin_pat_support();
}
int nv_init_pat_support(nvidia_stack_t *sp)
{
NV_STATUS status;
NvU32 data;
int disable_pat = 0;
int ret = 0;
status = rm_read_registry_dword(sp, NULL,
NV_USE_PAGE_ATTRIBUTE_TABLE, &data);
if ((status == NV_OK) && ((int)data != ~0))
{
disable_pat = (data == 0);
}
if (!disable_pat)
{
nv_enable_pat_support();
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
ret = nvidia_register_cpu_hotplug_notifier();
return ret;
}
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: builtin PAT support disabled.\n");
}
return 0;
}
void nv_teardown_pat_support(void)
{
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
nv_disable_pat_support();
nvidia_unregister_cpu_hotplug_notifier();
}
}
#endif /* defined(NV_ENABLE_PAT_SUPPORT) */

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PAT_H_
#define _NV_PAT_H_
#include "nv-linux.h"
#if defined(NV_ENABLE_PAT_SUPPORT)
extern int nv_init_pat_support(nvidia_stack_t *sp);
extern void nv_teardown_pat_support(void);
extern int nv_enable_pat_support(void);
extern void nv_disable_pat_support(void);
#else
static inline int nv_init_pat_support(nvidia_stack_t *sp)
{
(void)sp;
return 0;
}
static inline void nv_teardown_pat_support(void)
{
return;
}
static inline int nv_enable_pat_support(void)
{
return 1;
}
static inline void nv_disable_pat_support(void)
{
return;
}
#endif
#endif /* _NV_PAT_H_ */

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "nv-pci-table.h"
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{ }
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
.class_mask = ~0
},
{ }
};
MODULE_DEVICE_TABLE(pci, nv_module_device_table);

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TABLE_H_
#define _NV_PCI_TABLE_H_
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
#endif /* _NV_PCI_TABLE_H_ */

1152
kernel-open/nvidia/nv-pci.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,122 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
/*!
* @brief Unpowergate the display.
*
* Increment the device's usage counter, run pm_request_resume(dev)
* and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* pm_request_resume() submits a request to execute the subsystem-level
* resume callback for the device (the request is represented by a work
* item in pm_wq); returns 0 on success, 1 if the device's runtime PM
* status was already 'active', or error code if the request hasn't
* been queued up.
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_get(nvl->dev);
if (ret == 1)
{
nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n");
}
else if (ret == -EINPROGRESS)
{
/*
* pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC)
* which internally calls rpm_resume() and this function will throw
* "-EINPROGRESS" if it is being called when device state is
* RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set.
*/
nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n");
}
else if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret);
return NV_ERR_GENERIC;
}
return NV_OK;
}
/*!
* @brief Powergate the display.
*
* Decrement the device's usage counter; if the result is 0 then run
* pm_request_idle(dev) and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_powergate(
nv_state_t *nv)
{
NV_STATUS status = NV_ERR_GENERIC;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_put(nvl->dev);
if (ret == 0)
{
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret);
}
return status;
}

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

932
kernel-open/nvidia/nv-reg.h Normal file
View File

@@ -0,0 +1,932 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//
// This file holds Unix-specific NVIDIA driver options
//
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
#include "nv-firmware-registry.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
*/
#define __NV_REG_STRING(regkey) #regkey
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
/*
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
* of registry keys in the kernel module source code.
*/
#define __NV_REG_VAR(regkey) NVreg_##regkey
#if defined(NV_MODULE_PARAMETER)
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value)
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value)
#endif
#if defined(NV_MODULE_STRING_PARAMETER)
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value)
#endif
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
/*
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
* the regkey and the name of the module parameter. When using this macro, the
* name of the parameter is passed to the extra "parameter" argument, and it is
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
*/
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
/*
*----------------- registry key definitions--------------------------
*/
/*
* Option: ModifyDeviceFiles
*
* Description:
*
* When this option is enabled, the NVIDIA driver will verify the validity
* of the NVIDIA device files in /dev and attempt to dynamically modify
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
* driver to touch the device files, you can use this registry key.
*
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
* capability driver. Furthermore, the NVIDIA capability driver provides
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
* this module parameter per device file.
*
* Possible Values:
* 0 = disable dynamic device file management
* 1 = enable dynamic device file management (default)
*/
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
/*
* Option: DeviceFileUID
*
* Description:
*
* This registry key specifies the UID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default UID is 0 ('root').
*/
#define __NV_DEVICE_FILE_UID DeviceFileUID
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
/*
* Option: DeviceFileGID
*
* Description:
*
* This registry key specifies the GID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default GID is 0 ('root').
*/
#define __NV_DEVICE_FILE_GID DeviceFileGID
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
/*
* Option: DeviceFileMode
*
* Description:
*
* This registry key specifies the device file mode assigned to the NVIDIA
* device files created and/or modified by the NVIDIA driver when dynamic
* device file management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default mode is 0666 (octal, rw-rw-rw-).
*/
#define __NV_DEVICE_FILE_MODE DeviceFileMode
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
/*
* Option: ResmanDebugLevel
*
* Default value: ~0
*/
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
/*
* Option: RmLogonRC
*
* Default value: 1
*/
#define __NV_RM_LOGON_RC RmLogonRC
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
/*
* Option: InitializeSystemMemoryAllocations
*
* Description:
*
* The NVIDIA Linux driver normally clears system memory it allocates
* for use with GPUs or within the driver stack. This is to ensure
* that potentially sensitive data is not rendered accessible by
* arbitrary user applications.
*
* Owners of single-user systems or similar trusted configurations may
* choose to disable the aforementioned clears using this option and
* potentially improve performance.
*
* Possible values:
*
* 1 = zero out system memory allocations (default)
* 0 = do not perform memory clears
*/
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
InitializeSystemMemoryAllocations
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
/*
* Option: RegistryDwords
*
* Description:
*
* This option accepts a semicolon-separated list of key=value pairs. Each
* key name is checked against the table of static options; if a match is
* found, the static option value is overridden, but invalid options remain
* invalid. Pairs that do not match an entry in the static option table
* are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
*/
#define __NV_REGISTRY_DWORDS RegistryDwords
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
/*
* Option: RegistryDwordsPerDevice
*
* Description:
*
* This option allows to specify registry keys per GPU device. It helps to
* control registry at GPU level of granularity. It accepts a semicolon
* separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* For each of the registry keys that follows, key name is checked against the
* table of static options; if a match is found, the static option value is
* overridden, but invalid options remain invalid. Pairs that do not match an
* entry in the static option table are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
* pci=DDDD:BB:DD.F;<key=value>;..;"
*/
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
#define __NV_RM_MSG RmMsg
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
/*
* Option: UsePageAttributeTable
*
* Description:
*
* Enable/disable use of the page attribute table (PAT) available in
* modern x86/x86-64 processors to set the effective memory type of memory
* mappings to write-combining (WC).
*
* If enabled, an x86 processor with PAT support is present and the host
* system's Linux kernel did not configure one of the PAT entries to
* indicate the WC memory type, the driver will change the second entry in
* the PAT from its default (write-through (WT)) to WC at module load
* time. If the kernel did update one of the PAT entries, the driver will
* not modify the PAT.
*
* In both cases, the driver will honor attempts to map memory with the WC
* memory type by selecting the appropriate PAT entry using the correct
* set of PTE flags.
*
* Possible values:
*
* ~0 = use the NVIDIA driver's default logic (default)
* 1 = enable use of the PAT for WC mappings.
* 0 = disable use of the PAT for WC mappings.
*/
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
/*
* Option: EnableMSI
*
* Description:
*
* When this option is enabled and the host kernel supports the MSI feature,
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
* support for this feature instead of using PCI-E wired interrupt.
*
* Possible Values:
*
* 0 = disable MSI interrupt
* 1 = enable MSI interrupt (default)
*
*/
#define __NV_ENABLE_MSI EnableMSI
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
/*
* Option: EnablePCIeGen3
*
* Description:
*
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
* delivering a reliable, high-quality experience is not currently possible in
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
* option to enable PCIe Gen3 support.
*
* This is completely unsupported!
*
* Possible Values:
*
* 0: disable PCIe Gen3 support (default)
* 1: enable PCIe Gen3 support
*/
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
/*
* Option: MemoryPoolSize
*
* Description:
*
* When set to a non-zero value, this option specifies the size of the
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
* satisfy any system memory allocations requested by the NVIDIA kernel
* module.
*/
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
/*
* Option: KMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for kmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
/*
* Option: VMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for vmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
/*
* Option: IgnoreMMIOCheck
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will ignore
* MMIO limit check during device probe on VMWare ESXi kernel. This is
* typically necessary when VMware ESXi MMIO limit differs between any
* base version and its updates. Customer using updates can set regkey
* to avoid probe failure.
*/
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
/*
* Option: TCEBypassMode
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will attempt to setup
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
* necessary for CUDA applications in which large system memory mappings may
* exceed the default TCE remapping capacity when operated in non-bypass mode.
*
* This option has no effect on non-POWER platforms.
*
* Possible Values:
*
* 0: system default TCE mode on all GPUs
* 1: enable TCE bypass mode on all GPUs
* 2: disable TCE bypass mode on all GPUs
*/
#define __NV_TCE_BYPASS_MODE TCEBypassMode
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
#define NV_TCE_BYPASS_MODE_DEFAULT 0
#define NV_TCE_BYPASS_MODE_ENABLE 1
#define NV_TCE_BYPASS_MODE_DISABLE 2
/*
* Option: pci
*
* Description:
*
* On Unix platforms, per GPU based registry key can be specified as:
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
* for which the registry keys are to be applied.
*
* This define is not used on non-UNIX platforms.
*
* Possible Formats for value:
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
*/
#define __NV_PCI_DEVICE_BDF pci
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
/*
* Option: EnableStreamMemOPs
*
* Description:
*
* When this option is enabled, the CUDA driver will enable support for
* CUDA Stream Memory Operations in user-mode applications, which are so
* far required to be disabled by default due to limited support in
* devtools.
*
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
* driver for other reasons.
*
* Possible Values:
*
* 0 = disable feature (default)
* 1 = enable feature
*/
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
/*
* Option: EnableUserNUMAManagement
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will require the
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
* of its NUMA device memory.
*
* This option has no effect on platforms that do not support onlining
* device memory to a NUMA node (this feature is only supported on certain
* POWER9 systems).
*
* Possible Values:
*
* 0: disable user-mode NUMA management
* 1: enable user-mode NUMA management (default)
*/
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
/*
* Option: GpuBlacklist
*
* Description:
*
* This option accepts a list of blacklisted GPUs, separated by commas, that
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs. This regkey is deprecated and will be removed in the future. Use
* NV_REG_EXCLUDED_GPUS instead.
*/
#define __NV_GPU_BLACKLIST GpuBlacklist
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
/*
* Option: ExcludedGpus
*
* Description:
*
* This option accepts a list of excluded GPUs, separated by commas, that
* cannot be attached or used. Each excluded GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs.
*/
#define __NV_EXCLUDED_GPUS ExcludedGpus
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
/*
* Option: NvLinkDisable
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will not attempt to
* initialize or train NVLink connections for any GPUs. System reboot is required
* for changes to take affect.
*
* This option has no effect if no GPUs support NVLink.
*
* Possible Values:
*
* 0: Do not disable NVLink (default)
* 1: Disable NVLink
*/
#define __NV_NVLINK_DISABLE NvLinkDisable
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
/*
* Option: RestrictProfilingToAdminUsers
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will prevent users
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
* using GPU performance counters.
*
* Possible Values:
*
* 0: Do not restrict GPU counters (default)
* 1: Restrict GPU counters to system administrators only
*/
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
/*
* Option: TemporaryFilePath
*
* Description:
*
* When specified, this option changes the location in which the
* NVIDIA kernel module will create unnamed temporary files (e.g. to
* save the contents of video memory in). The indicated file must
* be a directory. By default, temporary files are created in /tmp.
*/
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
/*
* Option: PreserveVideoMemoryAllocations
*
* If enabled, this option prompts the NVIDIA kernel module to save and
* restore all video memory allocations across system power management
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
* only select allocations are preserved.
*
* Possible Values:
*
* 0: Preserve only select video memory allocations (default)
* 1: Preserve all video memory allocations
*/
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
* When this option is enabled, the NVIDIA driver will use S0ix-based
* power management for system suspend/resume, if both the platform and
* the GPU support S0ix.
*
* During system suspend, if S0ix is enabled and
* video memory usage is above the threshold configured by
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
* in self-refresh mode while the rest of the GPU is powered down.
*
* Otherwise, the driver will copy video memory contents to system memory
* and power off the video memory along with the GPU.
*
* Possible Values:
*
* 0: Disable S0ix based power management (default)
* 1: Enable S0ix based power management
*/
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
/*
* Option: S0ixPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use during
* S0ix-based system power management.
*
* When S0ix is enabled and the system is suspended, the driver will
* compare the amount of video memory in use with this threshold,
* to decide whether to keep video memory in self-refresh or copy video
* memory content to system memory.
*
* See the 'EnableS0ixPowerManagement' option.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* Default value for this option is 256MB.
*
*/
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
S0ixPowerManagementVideoMemoryThreshold
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
* This option controls how aggressively the NVIDIA kernel module will manage
* GPU power through kernel interfaces.
*
* Possible Values:
*
* 0: Never allow the GPU to be powered down (default).
* 1: Power down the GPU when it is not initialized.
* 2: Power down the GPU after it has been inactive for some time.
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
* the GPU.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
/*
* Option: DynamicPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use
* when selecting the dynamic power management scheme.
*
* When the driver detects that the GPU is idle, it will compare the amount
* of video memory in use with this threshold.
*
* If the current video memory usage is less than the threshold, the
* driver may preserve video memory contents in system memory and power off
* the video memory along with the GPU itself, if supported. Otherwise,
* the video memory will be kept in self-refresh mode while powering down
* the rest of the GPU, if supported.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* If the requested value is greater than 200MB (the default), then it
* will be capped to 200MB.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
DynamicPowerManagementVideoMemoryThreshold
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: RegisterPCIDriver
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with
* PCI subsystem.
*
* Possible values:
*
* 1 - register as PCI driver (default)
* 0 - do not register as PCI driver
*/
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
/*
* Option: EnablePCIERelaxedOrderingMode
*
* Description:
*
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
* every device to set the relaxed ordering bit to 1 in all outbound MWr
* transaction-layer packets. This is equivalent to setting the regkey to
* FORCE_ENABLE as a non-per-device registry key.
*
* Possible values:
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
* 1 - Enable PCIe TLP relaxed ordering bit-setting
*/
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableResizableBar
*
* Description:
*
* When this option is enabled, the NVIDIA driver will attempt to resize
* BAR1 to match framebuffer size, or the next largest available size on
* supported machines. This is currently only implemented for Linux.
*
* Possible values:
* 0 - Do not enable PCI BAR resizing
* 1 - Enable PCI BAR resizing
*/
#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar
#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR)
/*
* Option: EnableGpuFirmware
*
* Description:
*
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* NOTE: More details for this regkey can be found in nv-firmware-registry.h
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* NOTE: More details for this regkey can be found in nv-firmware-registry.h
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
/*
* Option: EnableDbgBreakpoint
*
* When this option is set to a non-zero value, and the kernel is configured
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
* INT3 on x86_64), assumed to be caught by an attached debugger.
*
* When this option is set to the value zero (the default), assertions within
* resman will print to the system log, but no CPU breakpoint will be triggered.
*/
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
/*
* Option: NVreg_DmaRemapPeerMmio
*
* Description:
*
* When this option is enabled, the NVIDIA driver will use device driver
* APIs provided by the Linux kernel for DMA-remapping part of a device's
* MMIO region to another device, creating e.g., IOMMU mappings as necessary.
* When this option is disabled, the NVIDIA driver will instead only apply a
* fixed offset, which may be zero, to CPU physical addresses to produce the
* DMA address for the peer's MMIO region, and no IOMMU mappings will be
* created.
*
* This option only affects peer MMIO DMA mappings, and not system memory
* mappings.
*
* Possible Values:
* 0 = disable dynamic DMA remapping of peer MMIO regions
* 1 = enable dynamic DMA remapping of peer MMIO regions (default)
*/
#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio
#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO)
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
/*
* Option: NVreg_RmNvlinkBandwidth
*
* Description:
*
* This option allows user to reduce the NVLINK P2P bandwidth to save power.
* The option is in the string format.
*
* Possible string values:
* OFF: 0% bandwidth
* MIN: 15%-25% bandwidth depending on the system's NVLink topology
* HALF: 50% bandwidth
* 3QUARTER: 75% bandwidth
* FULL: 100% bandwidth (default)
*
* This option is only for Hopper+ GPU with NVLINK version 4.0.
*/
#define __NV_RM_NVLINK_BW RmNvlinkBandwidth
#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW)
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
*---------registry key parameter declarations--------------
*/
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
/*
*----------------registry database definition----------------------
*/
/*
* You can enable any of the registry options disabled by default by
* editing their respective entries in the table below. The last field
* determines if the option is considered valid - in order for the
* changes to take effect, you need to recompile and reload the NVIDIA
* kernel module.
*/
nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO),
{NULL, NULL}
};
#elif defined(NVRM)
extern nv_parm_t nv_parms[];
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
#endif /* _RM_REG_H_ */

View File

@@ -0,0 +1,89 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "nv-linux.h"
#include "os-interface.h"
#include "nv-report-err.h"
nv_report_error_cb_t nv_error_cb_handle = NULL;
int nv_register_error_cb(nv_report_error_cb_t report_error_cb)
{
if (report_error_cb == NULL)
return -EINVAL;
if (nv_error_cb_handle != NULL)
return -EBUSY;
nv_error_cb_handle = report_error_cb;
return 0;
}
EXPORT_SYMBOL(nv_register_error_cb);
int nv_unregister_error_cb(void)
{
if (nv_error_cb_handle == NULL)
return -EPERM;
nv_error_cb_handle = NULL;
return 0;
}
EXPORT_SYMBOL(nv_unregister_error_cb);
struct pci_dev;
void nv_report_error(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
va_list ap_copy;
char *buffer;
int length = 0;
int status = NV_OK;
if (nv_error_cb_handle != NULL)
{
va_copy(ap_copy, ap);
length = vsnprintf(NULL, 0, format, ap);
va_end(ap_copy);
if (length > 0)
{
status = os_alloc_mem((void *)&buffer, (length + 1)*sizeof(char));
if (status == NV_OK)
{
vsnprintf(buffer, length, format, ap);
nv_error_cb_handle(dev, error_number, buffer, length + 1);
os_free_mem(buffer);
}
}
}
}

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_REPORT_ERR_H_
#define _NV_REPORT_ERR_H_
/*
* @brief
* Callback definition for obtaining XID error string and data.
*
* @param[in] pci_dev *
* Structure describring GPU PCI device.
* @param[in] uint32_t
* XID number
* @param[in] char *
* Error string with HWERR info.
* @param[in] int
* Length of error string.
*/
typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, int);
/*
* @brief
* Register callback function to obtain XID error string and data.
*
* @param[in] report_error_cb
* A function pointer to recieve callback.
*
* @return
* 0 upon successful completion.
* -EINVAL callback handle is NULL.
* -EBUSY callback handle is already registered.
*/
int nv_register_error_cb(nv_report_error_cb_t report_error_cb);
/*
* @brief
* Unregisters callback function handle.
*
* @return
* 0 upon successful completion.
* -EPERM unregister not permitted on NULL callback handle.
*/
int nv_unregister_error_cb(void);
#endif /* _NV_REPORT_ERR_H_ */

View File

@@ -0,0 +1,201 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-rsync.h"
nv_rsync_info_t g_rsync_info;
void nv_init_rsync_info(
void
)
{
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
g_rsync_info.usage_count = 0;
g_rsync_info.data = NULL;
NV_INIT_MUTEX(&g_rsync_info.lock);
}
void nv_destroy_rsync_info(
void
)
{
WARN_ON(g_rsync_info.data);
WARN_ON(g_rsync_info.usage_count);
WARN_ON(g_rsync_info.relaxed_ordering_mode);
}
int nv_get_rsync_info(
void
)
{
int mode;
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.get_relaxed_ordering_mode)
{
rc = g_rsync_info.get_relaxed_ordering_mode(&mode,
g_rsync_info.data);
if (rc != 0)
{
goto done;
}
g_rsync_info.relaxed_ordering_mode = !!mode;
}
}
g_rsync_info.usage_count++;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_put_rsync_info(
void
)
{
int mode;
down(&g_rsync_info.lock);
g_rsync_info.usage_count--;
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.put_relaxed_ordering_mode)
{
mode = g_rsync_info.relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data);
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
}
}
up(&g_rsync_info.lock);
}
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.get_relaxed_ordering_mode != NULL)
{
rc = -EBUSY;
goto done;
}
if (g_rsync_info.usage_count != 0)
{
rc = -EBUSY;
goto done;
}
g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode;
g_rsync_info.wait_for_rsync = wait_for_rsync;
g_rsync_info.data = data;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
down(&g_rsync_info.lock);
WARN_ON(g_rsync_info.usage_count != 0);
WARN_ON(g_rsync_info.get_relaxed_ordering_mode !=
get_relaxed_ordering_mode);
WARN_ON(g_rsync_info.put_relaxed_ordering_mode !=
put_relaxed_ordering_mode);
WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync);
WARN_ON(g_rsync_info.data != data);
g_rsync_info.get_relaxed_ordering_mode = NULL;
g_rsync_info.put_relaxed_ordering_mode = NULL;
g_rsync_info.wait_for_rsync = NULL;
g_rsync_info.data = NULL;
up(&g_rsync_info.lock);
}
NvBool nv_get_rsync_relaxed_ordering_mode(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
* lock the relaxed ordering state by ref-counting the rsync module
* through get_relaxed_ordering_mode.
*/
return g_rsync_info.relaxed_ordering_mode;
}
void nv_wait_for_rsync(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
* block unregistration of the rsync driver by ref-counting the module
* through get_relaxed_ordering_mode.
*/
if (g_rsync_info.relaxed_ordering_mode)
{
WARN_ON(g_rsync_info.wait_for_rsync == NULL);
g_rsync_info.wait_for_rsync(nvl->pci_dev, g_rsync_info.data);
}
}

View File

@@ -0,0 +1,57 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_RSYNC_H_
#define _NV_RSYNC_H_
#include "nv-linux.h"
typedef struct nv_rsync_info
{
struct semaphore lock;
uint32_t usage_count;
NvBool relaxed_ordering_mode;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
void *data;
} nv_rsync_info_t;
void nv_init_rsync_info(void);
void nv_destroy_rsync_info(void);
int nv_get_rsync_info(void);
void nv_put_rsync_info(void);
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
NvBool nv_get_rsync_relaxed_ordering_mode(nv_state_t *nv);
void nv_wait_for_rsync(nv_state_t *nv);
#endif

View File

@@ -0,0 +1,161 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-frontend.h"
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap,
NvU32 prot,
void *pAllocPriv,
NvU64 pageIndex,
NvU32 fd
)
{
NV_STATUS status = NV_OK;
nv_alloc_mapping_context_t *nvamc = NULL;
nv_file_private_t *nvfp = NULL;
nv_linux_file_private_t *nvlfp = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
void *priv = NULL;
nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv);
if (nvfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
nvlfp = nv_get_nvlfp_from_nvfp(nvfp);
nvamc = &nvlfp->mmap_context;
if (nvamc->valid)
{
status = NV_ERR_STATE_IN_USE;
goto done;
}
if (NV_IS_CTL_DEVICE(nv))
{
nvamc->alloc = pAllocPriv;
nvamc->page_index = pageIndex;
}
else
{
if (NV_STATE_PTR(nvlfp->nvptr) != nv)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
nvamc->mmap_start = nvuap->mmap_start;
nvamc->mmap_size = nvuap->mmap_size;
if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE)
{
nvamc->page_array = nvuap->page_array;
nvamc->num_pages = nvuap->num_pages;
}
nvamc->access_start = nvuap->access_start;
nvamc->access_size = nvuap->access_size;
nvamc->remap_prot_extra = nvuap->remap_prot_extra;
}
nvamc->prot = prot;
nvamc->valid = NV_TRUE;
nvamc->caching = nvuap->caching;
done:
nv_put_file_private(priv);
return status;
}
NV_STATUS NV_API_CALL nv_alloc_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 pageIndex,
NvU32 pageOffset,
NvU64 size,
NvU32 protect,
NvU64 *pUserAddress,
void **ppPrivate
)
{
nv_alloc_t *at = pAllocPrivate;
if (at->flags.contig)
*pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
else
*pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset);
return NV_OK;
}
NV_STATUS NV_API_CALL nv_free_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 userAddress,
void *pPrivate
)
{
return NV_OK;
}
/*
* This function adjust the {mmap,access}_{start,size} to reflect platform-specific
* mechanisms for isolating mappings at a finer granularity than the os_page_size
*/
NV_STATUS NV_API_CALL nv_get_usermap_access_params(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap
)
{
NvU64 addr = nvuap->addr;
NvU64 size = nvuap->size;
nvuap->remap_prot_extra = 0;
/*
* Do verification and cache encoding based on the original
* (ostensibly smaller) mmap request, since accesses should be
* restricted to that range.
*/
if (rm_gpu_need_4k_page_isolation(nv) &&
NV_4K_PAGE_ISOLATION_REQUIRED(addr, size))
{
#if defined(NV_4K_PAGE_ISOLATION_PRESENT)
nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION;
nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr);
nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size);
nvuap->mmap_start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr);
nvuap->mmap_size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size);
#else
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n");
return NV_ERR_OPERATING_SYSTEM;
#endif
}
return NV_OK;
}

703
kernel-open/nvidia/nv-vm.c Normal file
View File

@@ -0,0 +1,703 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os-interface.h"
#include "nv.h"
#include "nv-linux.h"
static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages)
{
#if defined(NV_SET_MEMORY_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
unsigned long addr = (unsigned long)page_address(page);
set_memory_uc(addr, num_pages);
#elif defined(NV_SET_PAGES_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
set_pages_uc(page, num_pages);
#endif
}
static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages)
{
#if defined(NV_SET_MEMORY_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
unsigned long addr = (unsigned long)page_address(page);
set_memory_wb(addr, num_pages);
#elif defined(NV_SET_PAGES_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
set_pages_wb(page, num_pages);
#endif
}
static inline int nv_set_memory_array_type_present(NvU32 type)
{
switch (type)
{
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
return 1;
case NV_MEMORY_WRITEBACK:
return 1;
#endif
default:
return 0;
}
}
static inline int nv_set_pages_array_type_present(NvU32 type)
{
switch (type)
{
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
return 1;
case NV_MEMORY_WRITEBACK:
return 1;
#endif
default:
return 0;
}
}
static inline void nv_set_memory_array_type(
unsigned long *pages,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
set_memory_array_uc(pages, num_pages);
break;
case NV_MEMORY_WRITEBACK:
set_memory_array_wb(pages, num_pages);
break;
#endif
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
break;
}
}
static inline void nv_set_pages_array_type(
struct page **pages,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
set_pages_array_uc(pages, num_pages);
break;
case NV_MEMORY_WRITEBACK:
set_pages_array_wb(pages, num_pages);
break;
#endif
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
break;
}
}
static inline void nv_set_contig_memory_type(
nvidia_pte_t *page_ptr,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
case NV_MEMORY_UNCACHED:
nv_set_contig_memory_uc(page_ptr, num_pages);
break;
case NV_MEMORY_WRITEBACK:
nv_set_contig_memory_wb(page_ptr, num_pages);
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
}
}
static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
{
NvU32 i;
NV_STATUS status = NV_OK;
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
unsigned long *pages = NULL;
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
struct page **pages = NULL;
#else
unsigned long *pages = NULL;
#endif
nvidia_pte_t *page_ptr;
struct page *page;
if (nv_set_memory_array_type_present(type))
{
status = os_alloc_mem((void **)&pages,
at->num_pages * sizeof(unsigned long));
}
else if (nv_set_pages_array_type_present(type))
{
status = os_alloc_mem((void **)&pages,
at->num_pages * sizeof(struct page*));
}
if (status != NV_OK)
pages = NULL;
//
// If the set_{memory,page}_array_* functions are in the kernel interface,
// it's faster to use them since they work on non-contiguous memory,
// whereas the set_{memory,page}_* functions do not.
//
if (pages)
{
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
pages[i] = (unsigned long)page_address(page);
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
pages[i] = page;
#endif
}
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
nv_set_memory_array_type(pages, at->num_pages, type);
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
nv_set_pages_array_type(pages, at->num_pages, type);
#endif
os_free_mem(pages);
}
//
// If the set_{memory,page}_array_* functions aren't present in the kernel
// interface, each page has to be set individually, which has been measured
// to be ~10x slower than using the set_{memory,page}_array_* functions.
//
else
{
for (i = 0; i < at->num_pages; i++)
nv_set_contig_memory_type(at->page_table[i], 1, type);
}
}
static NvU64 nv_get_max_sysmem_address(void)
{
NvU64 global_max_pfn = 0ULL;
int node_id;
for_each_online_node(node_id)
{
global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id));
}
return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
}
static unsigned int nv_compute_gfp_mask(
nv_state_t *nv,
nv_alloc_t *at
)
{
unsigned int gfp_mask = NV_GFP_KERNEL;
struct device *dev = at->dev;
/*
* If we know that SWIOTLB is enabled (and therefore we avoid calling the
* kernel to DMA-remap the pages), or if we are using dma_direct (which may
* transparently use the SWIOTLB for pages that are unaddressable by the
* device, in kernel versions 5.0 and later), limit our allocation pool
* to the first 4GB to avoid allocating pages outside of our device's
* addressable limit.
* Also, limit the allocation to the first 4GB if explicitly requested by
* setting the "nv->force_dma32_alloc" variable.
*/
if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc)
{
NvU64 max_sysmem_address = nv_get_max_sysmem_address();
if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) ||
(nv && nv->force_dma32_alloc))
{
gfp_mask = NV_GFP_DMA32;
}
}
#if defined(__GFP_RETRY_MAYFAIL)
gfp_mask |= __GFP_RETRY_MAYFAIL;
#elif defined(__GFP_NORETRY)
gfp_mask |= __GFP_NORETRY;
#endif
#if defined(__GFP_ZERO)
if (at->flags.zeroed)
gfp_mask |= __GFP_ZERO;
#endif
#if defined(__GFP_THISNODE)
if (at->flags.node)
gfp_mask |= __GFP_THISNODE;
#endif
// Compound pages are required by vm_insert_page for high-order page
// allocations
if (at->order > 0)
gfp_mask |= __GFP_COMP;
return gfp_mask;
}
/*
* This function is needed for allocating contiguous physical memory in xen
* dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by
* NV_GET_FREE_PAGES may not be machine contiguous when size is more than
* 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory.
* Even though we get dma_address directly in this function, we will
* still call pci_map_page() later to get dma address. This is fine as it
* will return the same machine address.
*/
static NV_STATUS nv_alloc_coherent_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
NvU32 i;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
dma_addr_t bus_addr;
nv_linux_state_t *nvl;
struct device *dev;
if (!nv)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
dev = nvl->dev;
gfp_mask = nv_compute_gfp_mask(nv, at);
virt_addr = (unsigned long)dma_alloc_coherent(dev,
at->num_pages * PAGE_SIZE,
&bus_addr,
gfp_mask);
if (!virt_addr)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
return NV_ERR_NO_MEMORY;
}
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
}
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_UNCACHED);
}
at->flags.coherent = NV_TRUE;
return NV_OK;
}
static void nv_free_coherent_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
struct device *dev = at->dev;
page_ptr = at->page_table[0];
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_WRITEBACK);
}
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
}
NV_STATUS nv_alloc_contig_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
NV_STATUS status;
nvidia_pte_t *page_ptr;
NvU32 i, j;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
if (os_is_xen_dom0() || at->flags.unencrypted)
return nv_alloc_coherent_pages(nv, at);
at->order = get_order(at->num_pages * PAGE_SIZE);
gfp_mask = nv_compute_gfp_mask(nv, at);
if (at->flags.node)
{
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, at->order, gfp_mask);
}
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
}
if (virt_addr == 0)
{
if (os_is_vgx_hyper())
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__);
status = nv_alloc_coherent_pages(nv, at);
return status;
}
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
return NV_ERR_NO_MEMORY;
}
#if !defined(__GFP_ZERO)
if (at->flags.zeroed)
memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE));
#endif
for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE)
{
phys_addr = nv_get_kern_phys_address(virt_addr);
if (phys_addr == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = virt_addr;
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_UNCACHED);
}
at->flags.coherent = NV_FALSE;
return NV_OK;
failed:
if (i > 0)
{
for (j = 0; j < i; j++)
NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
}
page_ptr = at->page_table[0];
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
return status;
}
void nv_free_contig_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
unsigned int i;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
if (at->flags.coherent)
return nv_free_coherent_pages(at);
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_WRITEBACK);
}
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
{
static int count = 0;
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
page_ptr->page_count);
}
}
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
}
page_ptr = at->page_table[0];
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
NV_STATUS nv_alloc_system_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
NV_STATUS status;
nvidia_pte_t *page_ptr;
NvU32 i, j;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
dma_addr_t bus_addr;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages);
gfp_mask = nv_compute_gfp_mask(nv, at);
for (i = 0; i < at->num_pages; i++)
{
if (at->flags.unencrypted && (dev != NULL))
{
virt_addr = (unsigned long)dma_alloc_coherent(dev,
PAGE_SIZE,
&bus_addr,
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node)
{
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, 0, gfp_mask);
}
else
{
NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
}
if (virt_addr == 0)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto failed;
}
#if !defined(__GFP_ZERO)
if (at->flags.zeroed)
memset((void *)virt_addr, 0, PAGE_SIZE);
#endif
phys_addr = nv_get_kern_phys_address(virt_addr);
if (phys_addr == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
NV_FREE_PAGES(virt_addr, 0);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
#if defined(_PAGE_NX)
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
(phys_addr < 0x400000))
{
nv_printf(NV_DBG_SETUP,
"NVRM: VM: %s: discarding page @ 0x%llx\n",
__FUNCTION__, phys_addr);
--i;
continue;
}
#endif
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = virt_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
if (at->cache_type != NV_MEMORY_CACHED)
nv_set_memory_type(at, NV_MEMORY_UNCACHED);
return NV_OK;
failed:
if (i > 0)
{
for (j = 0; j < i; j++)
{
page_ptr = at->page_table[j];
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
}
}
}
return status;
}
void nv_free_system_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
unsigned int i;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
if (at->cache_type != NV_MEMORY_CACHED)
nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
{
static int count = 0;
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
page_ptr->page_count);
}
}
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
}
}
}
NvUPtr nv_vm_map_pages(
struct page **pages,
NvU32 count,
NvBool cached,
NvBool unencrypted
)
{
NvUPtr virt_addr = 0;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s: can't map %d pages, invalid context!\n",
__FUNCTION__, count);
os_dbg_breakpoint();
return virt_addr;
}
virt_addr = nv_vmap(pages, count, cached, unencrypted);
return virt_addr;
}
void nv_vm_unmap_pages(
NvUPtr virt_addr,
NvU32 count
)
{
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s: can't unmap %d pages at 0x%0llx, "
"invalid context!\n", __FUNCTION__, count, virt_addr);
os_dbg_breakpoint();
return;
}
nv_vunmap(virt_addr, count);
}

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
{
/* direct-mapped kernel address */
if (virt_addr_valid(address))
return __pa(address);
nv_printf(NV_DBG_ERRORS,
"NVRM: can't translate address in %s()!\n", __FUNCTION__);
return 0;
}

5461
kernel-open/nvidia/nv.c Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,326 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv_gpu_ops.h
*
* This file defines the interface between the common RM layer
* and the OS specific platform layers. (Currently supported
* are Linux and KMD)
*
*/
#ifndef _NV_GPU_OPS_H_
#define _NV_GPU_OPS_H_
#include "nvgputypes.h"
#include "nv_uvm_types.h"
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuTsg *gpuTsgHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuGuid,
struct gpuDevice **device,
NvBool bCreateSmcPartition);
NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
NvU64 vaBase,
NvU64 vaSize,
gpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *p2pCaps);
void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
NvLength pageCount,
NvU64 pageSize,
gpuPmaAllocationOptions *pPmaAllocOptions,
NvU64 *pPages);
void nvGpuOpsPmaFreePages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize);
NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
const gpuTsgAllocParams *params,
gpuTsgHandle *tsgHandle);
NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
NvU64 pointer);
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
NvU64 memory, NvLength length,
void **cpuPtr, NvU64 pageSize);
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
void* cpuPtr);
NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
gpuCaps *caps);
NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
gpuCesCaps *caps);
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuAddressSpace *dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress);
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
gpuMemoryInfo *pGpuMemoryInfo);
NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
NvHandle hSubDevice, NvU8 *gpuGuid,
unsigned guidLength);
NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
const NvU8 *gpuUuid,
NvHandle *hClient,
NvHandle *hDevice,
NvHandle *hSubDevice);
NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
NvHandle hPhysHandle);
NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo);
NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
NvU32 *pSubdeviceId);
NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
NvHandle hUserClient,
NvHandle hUserVASpace,
struct gpuAddressSpace **vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
gpuAccessCntrInfo *pAccessCntrInfo,
NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo,
gpuAccessCntrConfig *pAccessCntrConfig);
NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
struct gpuDevice *device2,
NvHandle *hP2pObject);
NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
NvHandle hP2pObject);
NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
NvHandle hClient,
NvHandle hChannel,
gpuRetainedChannel **retainedChannel,
gpuChannelInstanceInfo *channelInstanceInfo);
void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
gpuChannelResourceBindParams *channelResourceBindParams);
void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
const void *pFaultPacket);
// Private interface used for windows only
#if defined(NV_WINDOWS)
NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient);
NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel);
#endif // WINDOWS
// Interface used for SR-IOV heavy
NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
const gpuPagingChannelAllocParams *params,
gpuPagingChannelHandle *channelHandle,
gpuPagingChannelInfo *channelinfo);
void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device,
NvU64 *dstAddress);
void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device);
NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
NvU8 const *authTagBuffer);
NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 *messageNum);
NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 increment,
NvU8 *iv);
#endif /* _NV_GPU_OPS_H_*/

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,55 @@
NVIDIA_SOURCES ?=
NVIDIA_SOURCES_CXX ?=
NVIDIA_SOURCES += nvidia/nv-platform.c
NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c
NVIDIA_SOURCES += nvidia/nv-bpmp.c
NVIDIA_SOURCES += nvidia/nv-clk.c
NVIDIA_SOURCES += nvidia/nv-gpio.c
NVIDIA_SOURCES += nvidia/nv-backlight.c
NVIDIA_SOURCES += nvidia/nv-imp.c
NVIDIA_SOURCES += nvidia/nv-host1x.c
NVIDIA_SOURCES += nvidia/nv-platform-pm.c
NVIDIA_SOURCES += nvidia/nv-ipc-soc.c
NVIDIA_SOURCES += nvidia/nv.c
NVIDIA_SOURCES += nvidia/nv-pci.c
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
NVIDIA_SOURCES += nvidia/nv-nano-timer.c
NVIDIA_SOURCES += nvidia/nv-acpi.c
NVIDIA_SOURCES += nvidia/nv-cray.c
NVIDIA_SOURCES += nvidia/nv-dma.c
NVIDIA_SOURCES += nvidia/nv-i2c.c
NVIDIA_SOURCES += nvidia/nv-mmap.c
NVIDIA_SOURCES += nvidia/nv-p2p.c
NVIDIA_SOURCES += nvidia/nv-pat.c
NVIDIA_SOURCES += nvidia/nv-procfs.c
NVIDIA_SOURCES += nvidia/nv-usermap.c
NVIDIA_SOURCES += nvidia/nv-vm.c
NVIDIA_SOURCES += nvidia/nv-vtophys.c
NVIDIA_SOURCES += nvidia/os-interface.c
NVIDIA_SOURCES += nvidia/os-mlock.c
NVIDIA_SOURCES += nvidia/os-pci.c
NVIDIA_SOURCES += nvidia/os-registry.c
NVIDIA_SOURCES += nvidia/os-usermap.c
NVIDIA_SOURCES += nvidia/nv-modeset-interface.c
NVIDIA_SOURCES += nvidia/nv-pci-table.c
NVIDIA_SOURCES += nvidia/nv-kthread-q.c
NVIDIA_SOURCES += nvidia/nv-memdbg.c
NVIDIA_SOURCES += nvidia/nv-ibmnpu.c
NVIDIA_SOURCES += nvidia/nv-report-err.c
NVIDIA_SOURCES += nvidia/nv-rsync.c
NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-frontend.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
NVIDIA_SOURCES += nvidia/libspdm_aead.c
NVIDIA_SOURCES += nvidia/libspdm_ecc.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf.c
NVIDIA_SOURCES += nvidia/libspdm_rand.c
NVIDIA_SOURCES += nvidia/libspdm_shash.c
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
NVIDIA_SOURCES += nvidia/libspdm_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
NVIDIA_SOURCES += nvidia/libspdm_ec.c
NVIDIA_SOURCES += nvidia/libspdm_x509.c

View File

@@ -0,0 +1,269 @@
###########################################################################
# Kbuild fragment for nvidia.ko
###########################################################################
#
# Define NVIDIA_{SOURCES,OBJECTS}
#
include $(src)/nvidia/nvidia-sources.Kbuild
NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES))
obj-m += nvidia.o
nvidia-y := $(NVIDIA_OBJECTS)
NVIDIA_KO = nvidia/nvidia.ko
#
# nv-kernel.o_binary is the core binary component of nvidia.ko, shared
# across all UNIX platforms. Create a symlink, "nv-kernel.o" that
# points to nv-kernel.o_binary, and add nv-kernel.o to the list of
# objects to link into nvidia.ko.
#
# Note that:
# - The kbuild "clean" rule will delete all objects in nvidia-y (which
# is why we use a symlink instead of just adding nv-kernel.o_binary
# to nvidia-y).
# - kbuild normally uses the naming convention of ".o_shipped" for
# binary files. That is not used here, because the kbuild rule to
# create the "normal" object file from ".o_shipped" does a copy, not
# a symlink. This file is quite large, so a symlink is preferred.
# - The file added to nvidia-y should be relative to gmake's cwd.
# But, the target for the symlink rule should be prepended with $(obj).
# - The "symlink" command is called using kbuild's if_changed macro to
# generate an .nv-kernel.o.cmd file which can be used on subsequent
# runs to determine if the command line to create the symlink changed
# and needs to be re-executed.
#
NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary
NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $< $@
targets += $(NVIDIA_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE
$(call if_changed,symlink)
nvidia-y += $(NVIDIA_BINARY_OBJECT_O)
#
# Define nvidia.ko-specific CFLAGS.
#
NVIDIA_CFLAGS += -I$(src)/nvidia
NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS
ifeq ($(NV_BUILD_TYPE),release)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG
endif
ifeq ($(NV_BUILD_TYPE),develop)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER
endif
ifeq ($(NV_BUILD_TYPE),debug)
NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER
endif
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS))
#
# nv-procfs.c requires nv-compiler.h
#
NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h
$(NV_COMPILER_VERSION_HEADER):
@echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@
$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER)
clean-files += $(NV_COMPILER_VERSION_HEADER)
#
# Build nv-interface.o from the kernel interface layer objects, suitable
# for further processing by the top-level makefile to produce a precompiled
# kernel interface file.
#
NVIDIA_INTERFACE := nvidia/nv-interface.o
# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions
# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6
# look at both.
always += $(NVIDIA_INTERFACE)
always-y += $(NVIDIA_INTERFACE)
$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS))
$(LD) -r -o $@ $^
#
# Register the conftests needed by nvidia.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS)
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache_shared
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_rebar_get_possible_sizes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += wait_for_random_bytes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write_has_pointer_pos_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive
NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u32_array
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += remove_conflicting_framebuffers
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_get_platform
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pci_find_host_bridge
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_send_cmd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init_cb
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_init_cb
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += mdev_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_init
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_off
NV_CONFTEST_GENERIC_COMPILE_TESTS += memory_failure_mf_sw_simulated_defined

View File

@@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
bool libspdm_aead_gcm_prealloc(void **context);
void libspdm_aead_free(void *context);
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,287 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT)
return unsafe_follow_pfn(vma, address, pfn);
#else
return follow_pfn(vma, address, pfn);
#endif
}
/*!
* @brief Locates the PFNs for a user IO address range, and converts those to
* their associated PTEs.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and page count parameters.
* @param[in] start Beginning of the virtual address range of the IO PTEs.
* @param[in] page_count Number of pages containing the IO range being
* mapped.
* @param[in,out] pte_array Storage array for PTE addresses. Must be large
* enough to contain at least page_count pointers.
*
* @return NV_OK if the PTEs were identified successfully, error otherwise.
*/
static NV_STATUS get_io_ptes(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
NvU64 **pte_array)
{
NvU64 i;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0)
{
return NV_ERR_INVALID_ADDRESS;
}
pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT);
if (i == 0)
continue;
//
// This interface is to be used for contiguous, uncacheable I/O regions.
// Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided
// flags against this, and creates a single memory descriptor with the same
// attributes. This check ensures the actual mapping supplied matches the
// user's declaration. Ensure the PFNs represent a contiguous range,
// error if they do not.
//
if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE))
{
return NV_ERR_INVALID_ADDRESS;
}
}
return NV_OK;
}
/*!
* @brief Pins user IO pages that have been mapped to the user processes virtual
* address space with remap_pfn_range.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and the page count.
* @param[in] start Beginning of the virtual address range of the IO pages.
* @param[in] page_count Number of pages to pin from start.
* @param[in,out] page_array Storage array for pointers to the pinned pages.
* Must be large enough to contain at least page_count
* pointers.
*
* @return NV_OK if the pages were pinned successfully, error otherwise.
*/
static NV_STATUS get_io_pages(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
struct page **page_array)
{
NV_STATUS rmStatus = NV_OK;
NvU64 i, pinned = 0;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) ||
(!pfn_valid(pfn)))
{
rmStatus = NV_ERR_INVALID_ADDRESS;
break;
}
// Page-backed memory mapped to userspace with remap_pfn_range
page_array[i] = pfn_to_page(pfn);
get_page(page_array[i]);
pinned++;
}
if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
put_page(page_array[i]);
rmStatus = NV_ERR_INVALID_ADDRESS;
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
void *address,
NvU64 page_count,
NvU64 **pte_array,
void **page_array
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long pfn;
NvUPtr start = (NvUPtr)address;
void **result_array;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
// find the first VMA which intersects the interval start_addr..end_addr-1,
vma = find_vma_intersection(mm, start, start+1);
// Verify that the given address range is contained in a single vma
if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) ||
!((vma->vm_start <= start) &&
((vma->vm_end - start) >> PAGE_SHIFT >= page_count)))
{
nv_printf(NV_DBG_ERRORS,
"Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n",
start ,page_count);
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
if (nv_follow_pfn(vma, start, &pfn) < 0)
{
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
if (pfn_valid(pfn))
{
rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array);
if (rmStatus == NV_OK)
*page_array = (void *)result_array;
}
else
{
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
if (rmStatus == NV_OK)
*pte_array = (NvU64 *)result_array;
}
done:
nv_mmap_read_unlock(mm);
if (rmStatus != NV_OK)
{
os_free_mem(result_array);
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lock_user_pages(
void *address,
NvU64 page_count,
void **page_array,
NvU32 flags
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct page **user_pages;
NvU64 i, pinned;
unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0;
int ret;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&user_pages,
(page_count * sizeof(*user_pages)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
ret = NV_PIN_USER_PAGES((unsigned long)address,
page_count, gup_flags, user_pages, NULL);
nv_mmap_read_unlock(mm);
pinned = ret;
if (ret < 0)
{
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
else if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
NV_UNPIN_USER_PAGE(user_pages[i]);
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
*page_array = user_pages;
return NV_OK;
}
NV_STATUS NV_API_CALL os_unlock_user_pages(
NvU64 page_count,
void *page_array
)
{
NvBool write = 1;
struct page **user_pages = page_array;
NvU32 i;
for (i = 0; i < page_count; i++)
{
if (write)
set_page_dirty_lock(user_pages[i]);
NV_UNPIN_USER_PAGE(user_pages[i]);
}
os_free_mem(user_pages);
return NV_OK;
}

160
kernel-open/nvidia/os-pci.c Normal file
View File

@@ -0,0 +1,160 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
void* NV_API_CALL os_pci_init_handle(
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvU16 *vendor,
NvU16 *device
)
{
struct pci_dev *dev;
unsigned int devfn = PCI_DEVFN(slot, function);
if (!NV_MAY_SLEEP())
return NULL;
dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn);
if (dev != NULL)
{
if (vendor) *vendor = dev->vendor;
if (device) *device = dev->device;
pci_dev_put(dev); /* TODO: Fix me! (hotplug) */
}
return (void *) dev;
}
NV_STATUS NV_API_CALL os_pci_read_byte(
void *handle,
NvU32 offset,
NvU8 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_word(
void *handle,
NvU32 offset,
NvU16 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_dword(
void *handle,
NvU32 offset,
NvU32 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffffffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_byte(
void *handle,
NvU32 offset,
NvU8 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_byte( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_word(
void *handle,
NvU32 offset,
NvU16 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_word( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_dword(
void *handle,
NvU32 offset,
NvU32 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_dword( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NvBool NV_API_CALL os_pci_remove_supported(void)
{
#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE
return NV_TRUE;
#else
return NV_FALSE;
#endif
}
void NV_API_CALL os_pci_remove(
void *handle
)
{
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle);
#elif defined(DEBUG)
nv_printf(NV_DBG_ERRORS,
"NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n",
__FUNCTION__);
os_dbg_breakpoint();
#endif
}

View File

@@ -0,0 +1,344 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#define NV_DEFINE_REGISTRY_KEY_TABLE
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-gpu-info.h"
/*!
* @brief This function parses the PCI BDF identifier string and returns the
* Domain, Bus, Device and function components from the PCI BDF string.
*
* This parser is highly adaptable and hence allows PCI BDF string in following
* 3 formats.
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* @param[in] pci_dev_str String containing the BDF to be parsed.
* @param[out] pci_domain Pointer where pci_domain is to be returned.
* @param[out] pci_bus Pointer where pci_bus is to be returned.
* @param[out] pci_slot Pointer where pci_slot is to be returned.
* @param[out] pci_func Pointer where pci_func is to be returned.
*
* @return NV_TRUE if succeeds, or NV_FALSE otherwise.
*/
static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain,
NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func)
{
char *option_string = NULL;
char *token, *string;
NvU32 domain, bus, slot;
NV_STATUS status = NV_OK;
//
// remove_spaces() allocates memory, hence we need to keep a pointer
// to the original string for freeing at end of function.
//
if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL)
{
// memory allocation failed, returning
return NV_ERR_GENERIC;
}
string = option_string;
if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if ((token = strsep(&string, ".")) != NULL)
{
// PCI device can have maximum 8 functions only.
if ((string != NULL) && (!(*string >= '0' && *string <= '7') ||
(strlen(string) > 1)))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI function in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
else if (string == NULL)
{
*pci_func = 0;
}
else
{
*pci_func = (NvU32)(*string - '0');
}
domain = simple_strtoul(token, &string, 16);
if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI domain/bus in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
bus = simple_strtoul((token + 1), &string, 16);
if (string == NULL)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI bus/slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (*string != '\0')
{
if ((*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
slot = (NvU32)simple_strtoul(token + 1, &string, 16);
if ((slot == 0) && ((token + 1) == string))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
*pci_domain = domain;
*pci_bus = bus;
*pci_slot = slot;
}
else
{
*pci_slot = bus;
*pci_bus = domain;
*pci_domain = 0;
}
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_ARGUMENT;
}
done:
// Freeing the memory allocated by remove_spaces().
os_free_mem(option_string);
return status;
}
/*!
* @brief This function parses the registry keys per GPU device. It accepts a
* semicolon separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
*
* @param[in] sp pointer to nvidia_stack_t struct.
*
* @return NV_OK if succeeds, or NV_STATUS error code otherwise.
*/
NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp)
{
NV_STATUS status = NV_OK;
char *option_string = NULL;
char *ptr, *token;
char *name, *value;
NvU32 data, domain, bus, slot, func;
nv_linux_state_t *nvl = NULL;
nv_state_t *nv = NULL;
if (NVreg_RegistryDwordsPerDevice != NULL)
{
if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL)
{
return NV_ERR_GENERIC;
}
ptr = option_string;
while ((token = strsep(&ptr, ";")) != NULL)
{
if (!(name = strsep(&token, "=")) || !strlen(name))
{
continue;
}
if (!(value = strsep(&token, "=")) || !strlen(value))
{
continue;
}
if (strsep(&token, "=") != NULL)
{
continue;
}
// If this key is "pci", then value is pci_dev id string
// which needs special parsing as it is NOT a dword.
if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0)
{
status = pci_str_to_bdf(value, &domain, &bus, &slot, &func);
// Check if PCI_DEV id string was in a valid format or NOT.
if (NV_OK != status)
{
// lets reset cached pci dev
nv = NULL;
}
else
{
nvl = find_pci(domain, bus, slot, func);
//
// If NO GPU found corresponding to this GPU, then reset
// cached state. This helps ignore the following registry
// keys until valid PCI BDF is found in the commandline.
//
if (!nvl)
{
nv = NULL;
}
else
{
nv = NV_STATE_PTR(nvl);
}
}
continue;
}
//
// Check if cached pci_dev string in the commandline is in valid
// format, else we will skip all the successive registry entries
// (<key, value> pairs) until a valid PCI_DEV string is encountered
// in the commandline.
//
if (!nv)
continue;
data = (NvU32)simple_strtoul(value, NULL, 0);
rm_write_registry_dword(sp, nv, name, data);
}
os_free_mem(option_string);
}
return status;
}
/*
* Compare given string UUID with the GpuBlacklist or ExcludedGpus registry
* parameter string and return whether the UUID is in the GPU exclusion list
*/
NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid)
{
const char *input;
char *list;
char *ptr;
char *token;
//
// When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined
// NVreg_ExcludedGpus takes precedence.
//
if (NVreg_ExcludedGpus != NULL)
input = NVreg_ExcludedGpus;
else if (NVreg_GpuBlacklist != NULL)
input = NVreg_GpuBlacklist;
else
return NV_FALSE;
if ((list = rm_remove_spaces(input)) == NULL)
return NV_FALSE;
ptr = list;
while ((token = strsep(&ptr, ",")) != NULL)
{
if (strcmp(token, uuid) == 0)
{
os_free_mem(list);
return NV_TRUE;
}
}
os_free_mem(list);
return NV_FALSE;
}
NV_STATUS NV_API_CALL os_registry_init(void)
{
nv_parm_t *entry;
unsigned int i;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return NV_ERR_NO_MEMORY;
}
if (NVreg_RmNvlinkBandwidth != NULL)
{
rm_write_registry_string(sp, NULL,
"RmNvlinkBandwidth",
NVreg_RmNvlinkBandwidth,
strlen(NVreg_RmNvlinkBandwidth));
}
if (NVreg_RmMsg != NULL)
{
rm_write_registry_string(sp, NULL,
"RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg));
}
rm_parse_option_string(sp, NVreg_RegistryDwords);
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
{
rm_write_registry_dword(sp, NULL, entry->name, *entry->data);
}
nv_kmem_cache_free_stack(sp);
return NV_OK;
}

View File

@@ -0,0 +1,78 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
void* NV_API_CALL os_map_user_space(
NvU64 start,
NvU64 size_bytes,
NvU32 mode,
NvU32 protect,
void **priv_data
)
{
return (void *)(NvUPtr)start;
}
void NV_API_CALL os_unmap_user_space(
void *address,
NvU64 size,
void *priv_data
)
{
}
NV_STATUS NV_API_CALL os_match_mmap_offset(
void *pAllocPrivate,
NvU64 offset,
NvU64 *pPageIndex
)
{
nv_alloc_t *at = pAllocPrivate;
NvU64 i;
for (i = 0; i < at->num_pages; i++)
{
if (at->flags.contig)
{
if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE)))
{
*pPageIndex = i;
return NV_OK;
}
}
else
{
if (offset == at->page_table[i]->phys_addr)
{
*pPageIndex = i;
return NV_OK;
}
}
}
return NV_ERR_OBJECT_NOT_FOUND;
}

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMP2PDEFINES_H_
#define _RMP2PDEFINES_H_
#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10)
#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10)
#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10)
#endif