Updating prebuilts and/or headers

af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md
ac7f91dfb6c5c469d2d8196c6baebe46ede5aee0 - CHANGELOG.md
1b03ad8c20ddb6d129ade64846377cc86ce4c1de - README.md
d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile
ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md
5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md
7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING
6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh
7ad4bb8aebd57a9be26329a611b14c5a70ccf2b7 - nouveau/extract-firmware-nouveau.py
36f9753dbbef7dd5610312d5b14bffac1a93cee4 - nouveau/nouveau_firmware_layout.ods
80545889e3c9967fd0ae12a65005be31bac354f2 - src/nvidia-modeset/Makefile
80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
36c20e9c111e66601b025802f840e7b87d09cdde - src/nvidia-modeset/kapi/interface/nvkms-kapi.h
27612b72a77ac67cd468ac7f15948d2ad78defed - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
727bd77cfbc9ac4989c2ab7eec171ceb516510aa - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
01d943d6edb0c647c2b8dbc44460948665b03e7a - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c
67fe73dc7149daf807194bd9a0f96252cb452179 - src/nvidia-modeset/kapi/src/nvkms-kapi.c
2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h
8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h
d51449fa2fd19748007f2e98f0233c92b45f9572 - src/nvidia-modeset/interface/nvkms-api-types.h
4da2125966732a80fc154cea4b18b2372b12501e - src/nvidia-modeset/interface/nvkms-api.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h
ebafc51b2b274cd1818e471850a5efa9618eb17d - src/nvidia-modeset/include/nvkms-prealloc.h
118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - src/nvidia-modeset/include/nvkms-modepool.h
6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h
1b75646c99c748f9070208eb58f0082812eabbd9 - src/nvidia-modeset/include/nvkms-private.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h
6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h
4a33d410f090fd4f4dfc9a6de285f8e8fb1c9ced - src/nvidia-modeset/include/nvkms-surface.h
c90e4393f568d96bc98cb52a93bfc3fdea10658d - src/nvidia-modeset/include/nvkms-modeset-workarea.h
8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h
fa829f1cd3b73f194f39879c48962b703f640b65 - src/nvidia-modeset/include/nvkms-vrr.h
c869ccfcda419d80b6691d3667c4e9196493065e - src/nvidia-modeset/include/nvkms-modeset-types.h
ec1374d339746b73bc7c7614695fde68c156074a - src/nvidia-modeset/include/nvkms-rm.h
07ac47b52b1b42c143501c4a95a88a3f86f5be03 - src/nvidia-modeset/include/nvkms-hdmi.h
11bae7c491bbb0ba4cad94b645d47c384191fa5c - src/nvidia-modeset/include/nvkms-flip.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h
cdf54b0d423f94f04d6f33b672c131125c13d260 - src/nvidia-modeset/include/nvkms-hw-flip.h
377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h
8a0ced82697c32b97a80fa3366704014879610e7 - src/nvidia-modeset/include/nvkms-flip-workarea.h
b0d407b0413453ec71481f84cc448d090b90d609 - src/nvidia-modeset/include/nvkms-evo3.h
496b94af536dd912866a05f7b2da53050b50c2f5 - src/nvidia-modeset/include/nvkms-prealloc-types.h
35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h
15dddd9307fa7ac201bd9ebc1e35e6ac0d2cf6c9 - src/nvidia-modeset/include/nvkms-evo.h
b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h
c1c7047929aafc849a924c7fa9f8bc206b8e7524 - src/nvidia-modeset/include/g_nvkms-evo-states.h
49af4a8fa95d0e595deacadbca5360f097722e7f - src/nvidia-modeset/include/nvkms-evo1.h
eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h
4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h
d05ef9a837f2927fe387e7d157ea76c7ef567807 - src/nvidia-modeset/include/nvkms-lut.h
d57ae79509c667e8d16a4756d85e3564c1b1ac34 - src/nvidia-modeset/include/nvkms-modeset.h
ae03509966df56d98fa72b7528ab43ec2b258381 - src/nvidia-modeset/include/nvkms-utils.h
81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h
fa8dbffe58d345634ab1ea8743ed29c9ec169f36 - src/nvidia-modeset/include/nvkms-dpy.h
691731826d6daa3bb5a3847a3dd2424d513113c4 - src/nvidia-modeset/include/nvkms-types.h
a79cfb74026085b0aa612c0ae6789083e196bbc2 - src/nvidia-modeset/include/nvkms-evo-states.h
a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - src/nvidia-modeset/include/dp/nvdp-timer.h
4625828efd425e1b29835ab91fcc3d2d85e92389 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
ae43c46687d16b93189047d9eeed933a67e5571f - src/nvidia-modeset/include/dp/nvdp-connector.h
c386632dbdc0e89019d5618f132dbcb3dff4dafb - src/nvidia-modeset/include/dp/nvdp-device.h
bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c
30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c
2b304663f2a005b5ccdecfafb69a3407f2feeb18 - src/nvidia-modeset/src/nvkms-evo2.c
fd64ffbcc1efd446fb3352ceaa8bd4221b23a1d2 - src/nvidia-modeset/src/nvkms-modeset.c
3e723edf2a0a2f4f93032feb4aeaaf7fd0acddfa - src/nvidia-modeset/src/g_nvkms-evo-states.c
2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c
488724910d9a3bf530303a4fa0889983d11ce5c0 - src/nvidia-modeset/src/nvkms-hdmi.c
761c8540278a1ffb9fe4aa0adb1b4ee95524787a - src/nvidia-modeset/src/nvkms-hal.c
54b41301663dc9fdc45d24c7a43ad4a980821f9d - src/nvidia-modeset/src/nvkms-attributes.c
3261fd9a1eb14f7f3fb0917757b1e2704d4abbd2 - src/nvidia-modeset/src/nvkms-hw-states.c
6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c
7d0e38f9d79e0c928bdc67276b8ecb0c18470b88 - src/nvidia-modeset/src/nvkms-hw-flip.c
03fb499633c485e0559da79500d4e66ea50e8d8f - src/nvidia-modeset/src/nvkms-framelock.c
05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c
dff88ceaf95239b51b60af915f92e389bb844425 - src/nvidia-modeset/src/nvkms-cursor.c
f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c
4d81c3052a0793d180642e3367b7870863015ef2 - src/nvidia-modeset/src/nvkms-rm.c
9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c
403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c
b890da1d428f30483d6f69e662218f19c074d011 - src/nvidia-modeset/src/nvkms-evo3.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c
b7232f4b4b8f0d4c395c241c451fc17b6ab84d7f - src/nvidia-modeset/src/nvkms-evo.c
6f2eb25d57d2dc3c1e5db869cfbdf556878d3332 - src/nvidia-modeset/src/nvkms-console-restore.c
94e9c19b7b6a5e56fd46b0885e7dd6fe698fe2df - src/nvidia-modeset/src/nvkms-prealloc.c
bf1b007fceaa1c38771f9e7d1130f9c0c3eddd80 - src/nvidia-modeset/src/nvkms-lut.c
e13960662d872f84dd77f36f778aee0521b4ff54 - src/nvidia-modeset/src/nvkms-modepool.c
b13bd89b5ac60ceab56e9c2398cf7668375ab7ad - src/nvidia-modeset/src/nvkms-flip.c
9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c
df59641109db4529eed62cf156b1815a3e67ba05 - src/nvidia-modeset/src/nvkms-vrr.c
f4a02d5b6cb1fa5d461514b21e13002ad9cfa1a4 - src/nvidia-modeset/src/nvkms-evo1.c
9e4d3e3505a84d8634a2ef2307628a8fe551a4c3 - src/nvidia-modeset/src/nvkms-surface.c
2fa9d9b3cbeeb9406f2dd51a4f4a5d53844a31c9 - src/nvidia-modeset/src/nvkms-dpy.c
a49319a235d8746b771a7c418277e168a291259f - src/nvidia-modeset/src/nvkms.c
6a35b80a6995777dc9500cac9659e6f0f0c12d23 - src/nvidia-modeset/src/nvkms-cursor3.c
a90b2c295271631b4c3abe6afb8dfd92d6b429c8 - src/nvidia-modeset/src/dp/nvdp-connector.cpp
c19775aebdaaaee3500378d47af6ff0b8eb486b8 - src/nvidia-modeset/src/dp/nvdp-device.cpp
51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
f96cd982b4c05351faa31d04ac30d6fa7c866bcb - src/nvidia-modeset/src/dp/nvdp-timer.cpp
535ce9f743903eb83a341eef1be812f4e4b50887 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
a2a4b7063fa903cc434163ebceb7c8d48f703c33 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp
110ac212ee8832c3fa3c4f45d6d33eed0301e992 - src/nvidia-modeset/src/dp/nvdp-host.cpp
252660f72b80add6f6071dd0b86288dda8dbb168 - src/nvidia-modeset/os-interface/include/nvkms.h
6e4ae13d024a1df676736752df805b6f91511009 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c
7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c
824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c
729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c
68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c
dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c
21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c
d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c
00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c
c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c
1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c
86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c
fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c
e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c
0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c
f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c
0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c
760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c
9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c
29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c
8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c
054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h
de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - src/common/softfloat/source/include/primitives.h
f36c896cfa01f1de9f9420189319e4e00c7fc52a - src/common/softfloat/source/include/internals.h
1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h
9645e179cf888bcd0e3836e8126b204b4b42b315 - src/common/softfloat/source/include/softfloat.h
21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
86cda6550cb02bbf595d1667573e4be83702a95e - src/common/softfloat/source/8086-SSE/specialize.h
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp
9f31213ab8037d7bb18c96a67d2630d61546544a - src/common/displayport/src/dp_mst_edid.cpp
818efd113374de206a36ccf2bf594b4e433a0b85 - src/common/displayport/src/dp_evoadapter.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp
de264916d0e3e873a4c624f237ea228469d0a980 - src/common/displayport/src/dp_watermark.cpp
554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - src/common/displayport/src/dp_discovery.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp
4803cde0fffcf89fed46d6deaeba5c96c669a908 - src/common/displayport/src/dp_messageheader.cpp
ca92fed27d4c5ca5e9495df08e63d5f446e7f24b - src/common/displayport/src/dp_deviceimpl.cpp
d2f8d43d650d9c0b4a8d9b8070087f13efdaac79 - src/common/displayport/src/dp_connectorimpl.cpp
b18924b1d50232b92223355f608fcca1b6d7ff46 - src/common/displayport/src/dp_configcaps.cpp
f4493ab7efc7030b4cd17bf792981a9dca497e29 - src/common/displayport/src/dp_groupimpl.cpp
37eabb1ab51cb38660eb24e294c63c8320750b96 - src/common/displayport/src/dp_sst_edid.cpp
fa4f4869d3d63c0180f30ae3736600a6627284c6 - src/common/displayport/src/dp_merger.cpp
98cec6b663cf630c789e9823675cbb4948e1ba5e - src/common/displayport/src/dp_edid.cpp
fbd877bac2efc8ee33e4e108e61c961e1fc42f44 - src/common/displayport/src/dp_messagecodings.cpp
aa2e56f6c66bf91c2b4a6030de2d29480f69710e - src/common/displayport/src/dp_wardatabase.cpp
1543bbaba8f3e149239cf44be3c0d080c624d5ba - src/common/displayport/src/dp_buffer.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp
45da2aabdaf6b5b2bf17a3deeb045feed1545415 - src/common/displayport/src/dp_messages.cpp
f83b3c17e9f26651f12c8835a682abdd66aed3a2 - src/common/displayport/src/dp_splitter.cpp
56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - src/common/displayport/src/dp_vrr.cpp
d991afdb694634e9df756184b5951739fc3fd0ab - src/common/displayport/src/dp_auxretry.cpp
719d2ddbfb8555636496cb5dd74ee6776059db92 - src/common/displayport/src/dp_timer.cpp
fe8007b3d98dad71b17595ecb67af77b198827a0 - src/common/displayport/src/dptestutil/dp_testmessage.cpp
36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - src/common/displayport/inc/dp_groupimpl.h
d876d77caef3541ae05f310857f3d32e642fba04 - src/common/displayport/inc/dp_auxdefs.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h
e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h
80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h
2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h
72f91aac76264d34ce778489f5ce839e03833db8 - src/common/displayport/inc/dp_messages.h
2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - src/common/displayport/inc/dp_timer.h
325818d0a4d1b15447923e2ed92c938d293dc079 - src/common/displayport/inc/dp_hostimp.h
9a0aa25938adf3bda9451aeab67fb04e266d771d - src/common/displayport/inc/dp_deviceimpl.h
df11366a5bcfb641025f12cddf9b5e8c2ed008de - src/common/displayport/inc/dp_watermark.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h
c2f5f82ddf1d0b5c976264ceb14fe9b67bf12851 - src/common/displayport/inc/dp_messagecodings.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h
07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h
8d8a5f0160922b6630fa796789c5d59cce94d9e0 - src/common/displayport/inc/dp_configcaps.h
7b7d9a137027fbbedfc041465987fa4ed4198ce4 - src/common/displayport/inc/dp_edid.h
34e808f745eaaff13aeb4e6cde1a8ce35f7b9def - src/common/displayport/inc/dp_connector.h
36d3c602cbbf0a52d574f841ba1b75125ec3b24a - src/common/displayport/inc/dp_linkconfig.h
29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h
02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h
d0b72ca2db108478bba75393c7255356da0e8233 - src/common/displayport/inc/dp_regkeydatabase.h
a3fc03562a3fa0968ab8d4a50424465174392f0e - src/common/displayport/inc/dp_connectorimpl.h
eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h
e70068249ebb59040a3e3be1fc4248d714550e61 - src/common/displayport/inc/dp_evoadapter.h
2a81681efef7ffced62c6d64cfdbc455d85fdb0a - src/common/displayport/inc/dp_mainlink.h
11487c992494f502d1c48ff00982998504336800 - src/common/displayport/inc/dp_internal.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h
e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h
020194b85245bad5de4dfe372a7ccb0c247d6ede - src/common/displayport/inc/dptestutil/dp_testmessage.h
edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c
26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c
bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - src/common/unix/common/utils/unix_rm_handle.c
07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h
667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h
9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h
881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h
1c947cfc8a133b00727104684764e5bb900c9d28 - src/common/unix/common/inc/nv_mode_timings.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h
78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h
5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h
751abf80513898b35a6449725e27724b1e23ac50 - src/common/sdk/nvidia/inc/nvmisc.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
fa267c903e9c449e62dbb6945906400d43417eff - src/common/sdk/nvidia/inc/nvlimits.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h
f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h
4a97d807a225d792544578f8112c9a3f90cc38f6 - src/common/sdk/nvidia/inc/nvstatuscodes.h
5cf4b517c9bd8f14593c1a6450078a774a39dd08 - src/common/sdk/nvidia/inc/nv-hypervisor.h
56cca793dd7bcbc4a3681677a822fc9f7a11a091 - src/common/sdk/nvidia/inc/nvos.h
7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h
e7a5fa74517ecd7f617860f01c5523bc5acd6432 - src/common/sdk/nvidia/inc/rs_access.h
b3de92f4edb1fcc856fd62b74359c9cd447519a8 - src/common/sdk/nvidia/inc/nverror.h
c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h
edf1f7d1457b015aa92c12f74f9ffa1e2f86a821 - src/common/sdk/nvidia/inc/nvtypes.h
b5dedeada189123f1965650827bf8a8193383a92 - src/common/sdk/nvidia/inc/nvimpshared.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h
7c03663f5e12754572e6efcbe09f51ec2c5f6502 - src/common/sdk/nvidia/inc/g_finn_rm_api.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h
95b0de4e76d9cc1bf49ef953fc00aa47e238ccd2 - src/common/sdk/nvidia/inc/nvfixedtypes.h
0edffddbe7764b268f724abc4ac84924767d1bf2 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
8607fdd8ecaa5140bac6643a3f715610ed391d67 - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
1cd4acc266f26dba813ac8802dba4e7ab381f753 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
175ad4d300fa40b960d07fee059b51c6b8639f01 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h
b35f86170f27005bc714b37edc96dffb97691bd4 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h
f64c19679dc9a20e62ef86d01878a006b505ed93 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h
72164895b0055a1942e1190a05d5090753af95a1 - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
360ed7fefcd6f8f4370b3cf88d43a9f8eec1e86d - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h
90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
7433f9674e36f120671d6e1802f2cdbcaadc58c3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
4fc1dd23cbfdb4ce49f1722f6e282cd21f33b7f5 - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h
a75a0a693d5742c8aecd788dc204a69863cfaf39 - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h
3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
fcdf7b331c3f7744d296918e68d44dfb114b9461 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h
b4cecb527cdc3ba4e68ca0031ac2179756108cb0 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
6627bf1716c0e06e870c083d264753d6a0abb439 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
a002a436f77b9544041a259405dddba90301df01 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
59340a74f26b92f689fe99f8303775c87a4bbd58 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
48691dd2c8d93fbd162e207cdb5d27ea30741d36 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
496c7a1a0c283b25a637a996995d3987c9045346 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
d5cdbcd10e049e8daf48feb5347f070d4ef85f8b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h
347efee37fa9404ce1933f01a7aa8a43b229db44 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
27341c2b0ad4eb10044fdf9fc2377024b4c63297 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
e8d117ea0d596ed6415324bd136de337f1a36ff1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
ae428e2b33fd058eeaffbbd4fbcd42178345883c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
18ed4b62c824c252abdd89a6616e3cc325ffa7fa - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
c1e506bd4bb6ad792c802961a9e03b371abb6919 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
5013ec94fa6311100818efb422b013ed77cffe82 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
86737d12192b2e7dc878bbeb8e57a41dcc1a655e - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
920f69f6d8386a107160da834545f71172cc2f0f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
bf976b3c428ccb9cb80d2f84f80b2c33d96e6ce1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
fed713e236b4fbc1e71dcf6747182ebea5836318 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
3db5bcbcae4063f2356ec76924b4bcc1d0df1a05 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
55cee85b56cb6ed5d017bab55c40cc8799789c8b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
66aa4e08f838e1f87e4babacb42d3d59cb6837ff - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h
42dc8204c0f6da47c5f741344032fc02702cfac5 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
4f31fe752e050953a0f87d04063dc152bba261fe - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
01a6a431e8aeffeec97755009b4e9575bdf0de7b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
22b8cc6c4677e664904659c726425a62aa24124e - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
4fa54b01cd70c3ca3b5cac93bade62dd09641b97 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h
74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
6c467ece3508071c2b3a296afffedd592726f8de - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
d411633fdeae66035e8c018ec8f6f25a9d5dd462 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
cfe695da65835f26c82399db0e44a56c7162c180 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
6975ff971c7ed1ac1a429896a3be1d95353fa4bd - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
0710ae87ce40008bea9181310b755ed74c397bfe - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
a8384da236fdd365d15d26daeb7db1c117ce1072 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
3ab2fc007f2c76ddc89caf14c4db0ab530515d4a - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
5f4b08b9ee7853eb33269ef7b415050eac2d702a - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
50f2ef0c01ab81077bd0e313d9ff168faae91670 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
ce4e42c8e73047ae03f835f9d3655dda1eb44452 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
53134475c1fd9c228a2c607051b34c28a5a80b03 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h
a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
0acaf597e0fc8f59a99b1772b7370395513492ed - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h
fa763827e4359b2deb6307ef742474f8f6f960dd - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
67a911b3177b75243e2fceef821ebcfd3668235e - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h
9279520e7dec45516d5339d82d35eb60b88f7300 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h
ce19b7278c6720b3bee62bcaa763ebb322d91957 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
e919b586a0e44cfe96b819deeab2c21c6af34f55 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h
cebcfa209648731e86af526834717b19d5b24159 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fevent.h
83d495dfe528167aa8ddbf45091051a89bd1a262 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h
6bc78fd963e407de843598786bdbcd1653005328 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
ebf415ac7d55643fa24493f27b69a843ea05f6c7 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
dd49db523d761d6f14e3890549cd8186c25f1d62 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
4e7733c378eb6f7924e43ff111017ae0e433800d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
668e6d37c0a556a70ae003569fe237b1024d6e6b - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
c341344b0879c5e9c7ba9ac0005eb28b347eaa63 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
1cef17e1833c002d968a2255726a4f785e4e66e7 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
899e3bc9a551ca0b181d1c8dd7ef8d779a66ecc4 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
316494234df96c6af34cc0bd2b1c791dc42ac92b - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
7edd8cdb8061ec137bc29d0dbbfbb5d169c0fd35 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
68bdc682ee42784c09409cd581bb991f7fc1bf41 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
12f1e560480dafde75646fb41aa349d9d729ca7d - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
ec7b09fe14c31c175e0abfcfa85dee20d57d02b4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h
12f1d3bb13c72fb1b52b62cf2a21f1b15619c06d - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
e238d87a94267f62141c413d0c44f03f27204b33 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
ea6d95de011af0039b1adc209733e524bc583c92 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
e0c551dc47bc06f8dff5884affdeb05eb118609f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
ccba5f12df1bce4b4235eed5a1c7a0cd2612c2ce - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
8e85550f24771c87138a973cd8cd714e419a14e8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
6fb840928970cf39919f2f415932bcc3e0764b25 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
33716a49ba4f7fcc0faa889d535e370a14edd582 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h
c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h
31534360d235be6dfdf4c1cf3854ce1e97be8fe2 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
f9db227bd1cefe92e4f35b52cafcb15266630582 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
456707a5de78815fc6a33f2da7e2a2a45ccc4884 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
d2992c1a9aac5b1b5cfefcca72e9a2401190158c - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
f9f404124a718ace14803ebe84efe752fcef816b - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
ff78c1bb58b1946f3e75e053be9f2b5de443e2f4 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
abed22b35137e2d40399eb4ed01724aa789cb635 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
505860d3cd6f7d5144f97195b9fb32dd5b8f74aa - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
1066e2e0a0633b0dd1b9114f31079c30178a5ac8 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
41a588413e1b13f0f3eec6647ffc7023dfaf651f - src/common/sdk/nvidia/inc/alloc/alloc_channel.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h
d90649c6a6c491bf086958426b56c697222e10bc - src/common/sdk/nvidia/inc/class/cl00fe.h
e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h
c61f8348c2978eef0a07191aaf92bd73e935f7bd - src/common/sdk/nvidia/inc/class/clc67e.h
2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h
05605d914edda157385e430ccdbeb3fcd8ad3c36 - src/common/sdk/nvidia/inc/class/cl9171.h
f968cd35ce1d1d8e3bc2f669025e6b1042b35354 - src/common/sdk/nvidia/inc/class/cl00de.h
7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h
1efc9d4aa038f208cd19533f6188ac3a629bf31a - src/common/sdk/nvidia/inc/class/cl917a.h
435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h
4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h
326dbbeb275b4fc29f6a7e2e42b32736474fec04 - src/common/sdk/nvidia/inc/class/cl9571.h
31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h
d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h
7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h
fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - src/common/sdk/nvidia/inc/class/clc671.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h
a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - src/common/sdk/nvidia/inc/class/clc67b.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h
9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h
02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h
fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h
13f8e49349460ef0480b74a7043d0591cf3eb68f - src/common/sdk/nvidia/inc/class/clc57b.h
9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h
c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h
bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h
e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h
513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h
5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h
8b75d2586151302d181f59d314b6b3f9f80b8986 - src/common/sdk/nvidia/inc/class/clc573.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h
5556b1c2e267d1fda7dee49abec983e5e4a93bff - src/common/sdk/nvidia/inc/class/cl2080_notification.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h
cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h
ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h
4a6444c347825e06bdd62401120553469f79c188 - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h
89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h
2e3d5c71793820d90973d547d8afdf41ff989f89 - src/common/sdk/nvidia/inc/class/clc67a.h
2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h
60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h
f5760f5054538f4ecf04d94fb1582a80a930bc29 - src/common/sdk/nvidia/inc/class/clc673.h
99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h
992b395855033b4a1fa7536d0de6ab2d071a5f82 - src/common/sdk/nvidia/inc/class/clc77d.h
36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h
204feb997ba42deab327d570e5f12235d5160f00 - src/common/sdk/nvidia/inc/class/clc57a.h
b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h
83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h
127f78d2bb92ef3f74effd00c2c67cf7db5382fe - src/common/sdk/nvidia/inc/class/clc67d.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h
bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h
c2600834921f8a6aad6a0404076fa76f9bc1c04d - src/common/sdk/nvidia/inc/class/clc37b.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h
026f66c4cc7baad36f1af740ae885dae58498e07 - src/common/sdk/nvidia/inc/class/clc371.h
2f87e87bcf9f38017ad84417d332a6aa7022c88f - src/common/sdk/nvidia/inc/class/cl9471.h
0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - src/common/sdk/nvidia/inc/class/cl0000_notification.h
b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - src/common/sdk/nvidia/inc/class/cl0073.h
b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h
15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h
11b19cb8d722146044ad5a12ae96c13ed5b122b6 - src/common/sdk/nvidia/inc/class/cl917b.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h
f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h
20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h
9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h
31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h
a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h
78efa8d42f828c89cd2a62b8c3931ebd0b0a6476 - src/common/sdk/nvidia/inc/class/clc771.h
9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h
060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h
022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h
a7c7899429766c092ee3ecf5f672b75bef55216c - src/common/sdk/nvidia/inc/class/cl9271.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h
74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h
a75d43f7b84d4cb39f8a2be35c12b2d2735f0ad9 - src/common/sdk/nvidia/inc/class/cl0000.h
16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h
a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h
866977d299eac812b41eb702a517e27bdc56e875 - src/common/sdk/nvidia/inc/class/clc37a.h
556d925de1e686243db36090cc35927f6d53c8bc - src/common/inc/nvUnixVersion.h
b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h
8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h
d877f4b99ae7d18cc5c78b85e89c0a7e3f3e8418 - src/common/inc/nvPNPVendorIds.h
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h
e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h
4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h
56f837b06862884abb82686948cafc024f210126 - src/common/inc/nvlog_defs.h
e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h
87bb66c50d1301edb50140e9896e1f67aaaa7175 - src/common/inc/nvVer.h
d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h
6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h
b58ed1b4372a5c84d5f3755b7090b196179a2729 - src/common/inc/nv_speculation_barrier.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h
4df0a4ae78271bb5b295288798d5be7866242adc - src/common/inc/nvctassert.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h
5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h
714db3678cd564170ec05022de6c37686da9df23 - src/common/inc/pex.h
90998aac8685a403fdec9ff875f7436373d76f71 - src/common/inc/displayport/dpcd14.h
ee0105d1113ce6330939c7e8d597d899daae662e - src/common/inc/displayport/dpcd.h
1fc95a17ddb619570063f6707d6a395684bfa884 - src/common/inc/displayport/dpcd20.h
669268ea1660e9e5b876f90da003599ba01356bb - src/common/inc/displayport/displayport.h
bbcecae47807b4578baa460da4147328140ecfcd - src/common/inc/swref/published/nv_ref.h
1efbc285d851a4430776a945d8c250b6a7019ab5 - src/common/inc/swref/published/nv_arch.h
38edc89fd4148b5b013b9e07081ba1e9b34516ac - src/common/inc/swref/published/turing/tu102/kind_macros.h
86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h
64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h
3cddaacf90bbbefedf500e6af7eaefb0f007813c - src/common/inc/swref/published/disp/v03_00/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h
54c516f23671ec703a4e000f700c16dce640367a - src/common/modeset/timing/nvt_dmt.c
cc04c12ebe4e2f7e31d0619ddd16db0c46b9db9e - src/common/modeset/timing/nvtiming.h
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c
cb1923187030de8ad82780663eb7151b68c3b735 - src/common/modeset/timing/displayid20.h
80063c05e3961073d23f76822bc9b55be533a6ee - src/common/modeset/timing/nvt_edid.c
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h
446e1044fcc8f7711111fca6a49d2776dba6e24c - src/common/modeset/timing/nvt_edidext_displayid.c
aad5d6f2b238b9582a63ba1e467da13d86ee4ded - src/common/modeset/timing/dpsdp.h
49df9034c1634d0a9588e5588efa832a71750a37 - src/common/modeset/timing/nvt_cvt.c
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h
2868a1ecc76e5dd57535929890b922028522f4b5 - src/common/modeset/timing/nvt_edidext_861.c
5b1ce39d595dfb88141f698e73b0a64d26e9b31d - src/common/modeset/timing/nvt_dsc_pps.c
04693ced0777456f6b7005f19a4b7c39a6d20ee6 - src/common/modeset/timing/nvtiming_pvt.h
28d7b753825d5f4a9402aff14488c125453e95c5 - src/common/modeset/timing/nvt_tv.c
849309f12f14d685acf548f9eed35fadea10c4e7 - src/common/modeset/timing/nvt_edidext_displayid20.c
890d8c2898a3277b0fed360301c2dc2688724f47 - src/common/modeset/timing/nvt_util.c
783bd7a92ca178ca396b15e8027561c8b61c09a3 - src/common/modeset/timing/nvt_displayid20.c
974f52eb92bda6186510c71a2b6ae25cb0514141 - src/common/modeset/timing/nvt_dsc_pps.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c
f2b434ed8bdd7624143654b7b3953d8c92e5a8e2 - src/common/modeset/hdmipacket/nvhdmipkt_common.h
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c
60ee78d72d4d6b03932b7111508784538f35381a - src/common/modeset/hdmipacket/nvhdmipkt.c
443c0a4b17a0019e4de3032c93c5cac258529f01 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h
bb634bc2517a2653be2534602ab0f4712e0b1363 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c
9fbe6313ee438f301ac75f5ca2228e27b785c4f4 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
381e1b8aeaa8bd586c51db1f9b37d3634285c16a - src/common/modeset/hdmipacket/nvhdmipkt_class.h
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c
1babb2c7f11b95fd69bcbc9dcffeefea29d61118 - src/common/modeset/hdmipacket/nvhdmipkt_C671.c
a1f52f0f78eec1d98b30b0f08bc1c5e88ae3d396 - src/common/modeset/hdmipacket/nvhdmipkt.h
4de33a60116ce3fa3f440db105561eddc21ce375 - src/common/shared/nvstatus/nvstatus.c
a71d2c98bc2dc5445436cd96ac5c7e6a57efcf84 - src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h
bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h
d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h
75d3a4e35230b114a2a233be8235f19220d953a4 - src/nvidia/interface/nvrm_registry.h
60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h
7dec210405c35d200be24bd1c0c81fcc6c3f93bf - src/nvidia/interface/deprecated/rmapi_deprecated.h
f7b69924dbdf53be6cd184583145726aa65d3acd - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
d81ef382635d0c4de47dfa3d709e0702f371ceb7 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c
253baf641e4e29ede6a49129c2dd1415b7e5d9bd - src/nvidia/kernel/inc/nvpcf.h
6f9edcff7ad34c4e85ec7c0b8d79c175009d438c - src/nvidia/kernel/inc/objrpc.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/kernel/inc/objtmr.h
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/kernel/inc/tmr.h
b5f3932b9f6e7223e8c755155b60be98fd0a21df - src/nvidia/kernel/inc/vgpu/rpc_global_enums.h
961ed81de50e67eadf163a3a8008ce1fde1d880c - src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h
31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - src/nvidia/kernel/inc/vgpu/rpc_vgpu.h
6006a612fcd546de794676da19fc431ddd0410e5 - src/nvidia/kernel/inc/vgpu/rpc.h
3477a139633890d3fdd2e5e02044e1a293566e3d - src/nvidia/kernel/inc/vgpu/rpc_headers.h
9b8e6b29a48ff022dda092cc8139dbe5ac6dedd8 - src/nvidia/generated/g_rs_client_nvoc.c
d0a43a5d4941392b3c6c1b5a0d156edc26559ded - src/nvidia/generated/g_disp_inst_mem_nvoc.c
f1e98f21f75eaba821fe16f2410921a4fd7c54ee - src/nvidia/generated/g_mem_mgr_nvoc.h
b0089bee11caa0d8994b39eaecfb42ca3507de37 - src/nvidia/generated/g_syncpoint_mem_nvoc.h
c2eae693c1b8d8502db368048f3b1c45d0576dc5 - src/nvidia/generated/g_chips2halspec_nvoc.h
0b2233e5cb68257231dd94310559bc09635c8279 - src/nvidia/generated/g_generic_engine_nvoc.c
e41a55d75416e6d9978d2cf788553acdb9336afd - src/nvidia/generated/g_resource_nvoc.c
14336cd31573538728e0bf17941681b9d91d2b12 - src/nvidia/generated/g_gpu_access_nvoc.c
b18ed7a5d71571b57266995f0d30317814e8bd6e - src/nvidia/generated/g_gpu_access_nvoc.h
76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - src/nvidia/generated/g_traceable_nvoc.c
42fac2ccb00006825e7d42a6b23264870365ace6 - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h
b3b3ee6b514249e553187dc14a98f74bdd9fa6c6 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h
fcb89aff81d5e2b0a4a39069356ee4644bf53b2b - src/nvidia/generated/g_os_nvoc.c
eefa27872e4acde78a18211b8ab51bc5436b6cfe - src/nvidia/generated/g_nv_debug_dump_nvoc.h
493a547850d9e7cdf74350de0e42aef2f66869a9 - src/nvidia/generated/g_client_resource_nvoc.h
3b08d4bb1612bb193cd2f26229b119cc43284879 - src/nvidia/generated/g_rs_server_nvoc.h
73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - src/nvidia/generated/g_traceable_nvoc.h
19d73b04597bca6d3a7dd82d327e6cbf4a591a65 - src/nvidia/generated/g_eng_state_nvoc.c
0eb34617fea0cc6843d317ba7cea287483e39703 - src/nvidia/generated/rmconfig.h
17c69e14076324c230bbe68b55141089c1f4d10e - src/nvidia/generated/g_os_desc_mem_nvoc.h
1268ee54592c8ae1078b72bfaff882549efbcd3c - src/nvidia/generated/g_disp_capabilities_nvoc.c
bdb198b18c700dc396f73191a8e696d106a1f716 - src/nvidia/generated/g_resource_nvoc.h
b0f47afbc6aefce339db95801f48823989abad8a - src/nvidia/generated/g_mem_desc_nvoc.h
779103a57f68832641a7616ea8c5608780cfc155 - src/nvidia/generated/g_disp_objs_nvoc.h
2a3476812057692ef35f9658d24c275a1576f498 - src/nvidia/generated/g_sdk-structures.h
125b688444f16d9cb3902a9f79959c05c12397e3 - src/nvidia/generated/g_disp_sf_user_nvoc.c
dbf11a9f931cfac248c3e6006bedeadb3d062670 - src/nvidia/generated/g_gpu_group_nvoc.c
ecb4db5b676f0541c851ba9454577812e1a07023 - src/nvidia/generated/g_object_nvoc.c
9b4cf69383d0a7b7492b2fa28983cfe4d88c3263 - src/nvidia/generated/g_vaspace_nvoc.h
d3b89f97bb0f4c5c0ca44e74040aab24c70ae06f - src/nvidia/generated/g_generic_engine_nvoc.h
c1652e6cc404f23660ee440b61c6d0b9149ff593 - src/nvidia/generated/g_gpu_resource_nvoc.c
85580813dbcf78bf4aeecf5e55054447396dcfe3 - src/nvidia/generated/g_gpu_db_nvoc.c
a97bf85ce6681aae086e0415aecaebf0208bfebb - src/nvidia/generated/g_tmr_nvoc.h
31270057a91fcd2dc7dbf1abed9e3f67d8db1787 - src/nvidia/generated/g_rmconfig_private.h
e181d568b36f4d6e717d6d26c7bbe4b4ed968f4f - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h
4302502637f5c4146cb963801258444f2d8173e1 - src/nvidia/generated/g_allclasses.h
61cb019a28b25479d65022226623be2d20f32429 - src/nvidia/generated/g_nv_name_released.h
7f89931ecb53fb0b88da1be5489fe50e3d7897c3 - src/nvidia/generated/g_resserv_nvoc.h
ac3965eea078f1998c3a3041f14212578682e599 - src/nvidia/generated/g_vaspace_nvoc.c
a44899c21c77899b3b8deb7b2613b16841bbf397 - src/nvidia/generated/g_gpu_mgr_nvoc.c
631ac1d7bfa00f66e699937b8cabc0cbbc26d151 - src/nvidia/generated/g_rs_server_nvoc.c
67df2bc381609f290f173ea73f3e8125ac073888 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
0e15fddc0426c42f3d22e5cb5609b5193adb7145 - src/nvidia/generated/g_standard_mem_nvoc.h
0a6b27d74e5e4ba872d77bfd369ddb5772abd8f8 - src/nvidia/generated/g_event_buffer_nvoc.h
9934a21ca6169499e471a2fc000c3eaee348391e - src/nvidia/generated/g_resource_fwd_decls_nvoc.h
aac0c7df733e179f2a5906ab66b302a5bee82cbe - src/nvidia/generated/g_gpu_db_nvoc.h
47ced25e3252d402b9a5c30115705d16651ab460 - src/nvidia/generated/g_object_nvoc.h
81f915ae199df67c1884bfc18f3d23f20941af6a - src/nvidia/generated/g_dce_client_nvoc.c
c8d6ddc934e0c4ae3fd2d2dc81d0d1a91c8b8d52 - src/nvidia/generated/g_disp_inst_mem_nvoc.h
b30dc7b4114007f7649e18a7be2d829a3752447a - src/nvidia/generated/g_mem_nvoc.c
33932ed2752329a63bcafd88f00e69203c3621c0 - src/nvidia/generated/g_gpu_mgr_nvoc.h
2156c006acf83494e55de3d5604e9234f73b2867 - src/nvidia/generated/g_eng_desc_nvoc.h
6742231d4f59cc03ed822b80fb3995d1821de488 - src/nvidia/generated/g_standard_mem_nvoc.c
a42b32adb0533fafb2de6b127c7e1939029cdeb5 - src/nvidia/generated/g_system_nvoc.c
a044b01f708a5690f1796579904539791e24d5a3 - src/nvidia/generated/g_hda_codec_api_nvoc.h
ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - src/nvidia/generated/g_hal_register.h
fc7f913eab7ef26b877606e0593928784c3121ec - src/nvidia/generated/g_device_nvoc.c
9c03069f964e4d628b68a4ab0cff3b44aee82bdd - src/nvidia/generated/g_rpc-structures.h
8db5b2345278ce409562ca35754447d353dd54d7 - src/nvidia/generated/g_rs_resource_nvoc.h
ad695d35b837b970b8f50a280d400ffed5067c0f - src/nvidia/generated/g_os_desc_mem_nvoc.c
14450b18d002d4e1786d4630ef4f1994c07ef188 - src/nvidia/generated/g_odb.h
93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h
dad5def7d6c24268ac1e1a75038cbf33900745ff - src/nvidia/generated/g_binary_api_nvoc.h
06094e14a41e58c8a687bc8b64197a73c0c2b61a - src/nvidia/generated/g_system_nvoc.h
92c99fd64caa9f78664ed1fd54313ee82e2cf9c7 - src/nvidia/generated/g_disp_channel_nvoc.h
e70cc806acae6fc1c3f4ffc283ded8351f3482c4 - src/nvidia/generated/g_hda_codec_api_nvoc.c
2239839c8a780a87e786439a49ab63e25d25001a - src/nvidia/generated/g_rmconfig_util.h
e3078050c80bf14c9f91f12b43eab48af94c9ec5 - src/nvidia/generated/g_disp_objs_nvoc.c
f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h
57431742e2f1bbefc9142db49a84f4e8264e4673 - src/nvidia/generated/g_mem_list_nvoc.h
12cb2f4228fe81762587413c7f346f3d271d9b6b - src/nvidia/generated/g_eng_state_nvoc.h
bfb7c703aa0e55ed5df9310a233861e43ef5c828 - src/nvidia/generated/g_prereq_tracker_nvoc.h
734ea4782083e4a7b940722577dc75177446eed1 - src/nvidia/generated/g_io_vaspace_nvoc.c
8b5821085e5aabc00408e7a90e78b2471de6797e - src/nvidia/generated/g_os_nvoc.h
5c65c680b77a501fd98460c4ce8fecd7ed95be14 - src/nvidia/generated/g_mem_mgr_nvoc.c
cf2a81f40855ceb13b0dc18fb1ee790ba939bfb2 - src/nvidia/generated/g_event_buffer_nvoc.c
d47bc1508583e02dc8234efce85fb7803dbd3d97 - src/nvidia/generated/g_hypervisor_nvoc.h
35889e5f6bdc996fa95c76d05e7b8902328d450b - src/nvidia/generated/g_rs_client_nvoc.h
61d09dd789fc4159344cec4c02ff9db13cd246eb - src/nvidia/generated/g_hal_mgr_nvoc.h
af86a67a1c33acc193efa6dba8bc46ebe5dbb5eb - src/nvidia/generated/g_gpu_class_list.c
aac848bd48955659eb5e07fcac70e6fe3c3a137a - src/nvidia/generated/g_hal_nvoc.c
1ca8ad4d9216aef1df145358c48e7ca533927e25 - src/nvidia/generated/g_objtmr_nvoc.c
b35821f54f7ec965edd25a60e58d7639cd19df19 - src/nvidia/generated/g_hal_archimpl.h
97ce053e6b047ecd0803a7571d061516de9d95ff - src/nvidia/generated/g_hal_mgr_nvoc.c
972e9ba00890776dc3a4f51300cbcd73c1691c1d - src/nvidia/generated/g_rpc-message-header.h
906af83650985c58b63fe3e1f24b75b5ac62d90d - src/nvidia/generated/g_gpu_nvoc.c
431796f7485743a0848883a204676424b4a3b65f - src/nvidia/generated/g_hal.h
44bcd3503d90703a33a7bb9c75b41111d092c5f8 - src/nvidia/generated/g_client_resource_nvoc.c
142a5e1b07a3bbe2952b27f4a65a133f5a100dc3 - src/nvidia/generated/g_prereq_tracker_nvoc.c
3c7d16d75ef53c09d7076c55976e71fd17a3f483 - src/nvidia/generated/g_subdevice_nvoc.h
7c698deeb69b4e92af3c7c4e6fc6274b75dab05c - src/nvidia/generated/g_disp_channel_nvoc.c
9b0d4695e84ec959790dd553944cb44685c5c251 - src/nvidia/generated/g_event_nvoc.h
803eb8b520597468e3dc99ecd29ffc1027dfe4be - src/nvidia/generated/g_context_dma_nvoc.h
09597f23d6a5440258656be81e7e6709390128f8 - src/nvidia/generated/g_hal_private.h
b459db8ccf299f7bda0fa9fa18ef1e3aeb2996eb - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c
170a42c047d0085873a48db0d83d59feb8dc327f - src/nvidia/generated/g_binary_api_nvoc.c
47f006ce959471f8ecd2a7b05d83d854610a521b - src/nvidia/generated/g_system_mem_nvoc.c
b9f25e208f5ea6f566dbd9cbcaaa30cd0786c31b - src/nvidia/generated/g_client_nvoc.h
31ee3939e0830f960aeb854827af0aace0dddb93 - src/nvidia/generated/g_kern_disp_nvoc.h
eb95c379eec668bfd697bcd4977d4f18da0b56bb - src/nvidia/generated/g_device_nvoc.h
1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - src/nvidia/generated/g_rmconfig_util.c
a1bfb789c1e23bac2b7a31255b7d738e40a290f2 - src/nvidia/generated/g_mem_nvoc.h
b5d4219786bd77483ce70a770caac52db51566cc - src/nvidia/generated/g_ioaccess_nvoc.c
97bab26b95f21f4618fd023284b20dd4d5a76ad4 - src/nvidia/generated/g_disp_capabilities_nvoc.h
b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - src/nvidia/generated/g_journal_nvoc.h
16c7821c01a4e728d66a25ca6eb824ce85ff908e - src/nvidia/generated/g_rs_resource_nvoc.c
6771b718fe182d524864f55fa23f145012205d5b - src/nvidia/generated/g_objtmr_nvoc.h
87c14e1c1a8f37f139f6a99efaf7752d6db48db5 - src/nvidia/generated/g_kern_disp_nvoc.c
4f3ff51033e4ef9491e8b345ffea36dfb5122055 - src/nvidia/generated/g_chips2halspec_nvoc.c
8a76494ebc5809ed30c31a9afa2a46bf2463e6e5 - src/nvidia/generated/g_dce_client_nvoc.h
e4ccb216aafed837a37fca90284b0a0413b3080d - src/nvidia/generated/g_kernel_head_nvoc.c
262192e794cba0bb120cbfe75ee037e868e34ef3 - src/nvidia/generated/g_subdevice_nvoc.c
71185f1534d3c53954c271566b610045aef3ed98 - src/nvidia/generated/g_system_mem_nvoc.h
549314acf103e21a4cab113114f719626202a19f - src/nvidia/generated/g_tmr_nvoc.c
c010d93fd293ec399a0cd05662a177e7251c7b1e - src/nvidia/generated/g_event_nvoc.c
693cd3e7b93e9377634800ff2b3669939ba10603 - src/nvidia/generated/g_kernel_head_nvoc.h
0097015ef25011bee849966ef5248d206ab0f816 - src/nvidia/generated/g_gpu_resource_nvoc.h
dc922421b0f41b7b8f0219caa623c099fc3f083d - src/nvidia/generated/g_ioaccess_nvoc.h
5a46be3060122eca672dc3bf11bdb6e68700b5e4 - src/nvidia/generated/g_gpu_halspec_nvoc.h
10645f82dd031d0aa6f4a3dfc039ef776f2fdee9 - src/nvidia/generated/g_hal_nvoc.h
574adefb17ee3e2a7d85262f8ce4d8b4bc4367b4 - src/nvidia/generated/g_gpu_halspec_nvoc.c
653b72892f7c3ce7fd3e28690863ef89826b5314 - src/nvidia/generated/g_context_dma_nvoc.c
3b1586e0aebb66d31190be64b1109232ee3467bf - src/nvidia/generated/g_ref_count_nvoc.h
155b6249c4fd472218cef640fa0a665cec10bfa4 - src/nvidia/generated/g_disp_sf_user_nvoc.h
2cac1d138a8bcf99e70068f50698f6cdd3dc57dd - src/nvidia/generated/g_syncpoint_mem_nvoc.c
6aea089965620df057ab6b900496590ca26772b2 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c
8e8c58d6e99de01acf926026506ab91499109dd4 - src/nvidia/generated/g_gpu_nvoc.h
8f1b0c4a6b75280b5155aef8490c95237bbf6f97 - src/nvidia/generated/g_gpu_group_nvoc.h
d960a819d29d7e968eaab0e7a29897426b7ba646 - src/nvidia/generated/g_io_vaspace_nvoc.h
47bed9b41213c837c4ca08aaaefe079b84dfd52f - src/nvidia/generated/g_client_nvoc.c
36b3993cc05598590bc6356bab5ea7c0a2efd2f0 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
719d890f8160efe57e4c3267db65885ebb66cd03 - src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c
4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c
381cbcd5c362e5c5563806bfff2fb60eec80eda2 - src/nvidia/src/kernel/gpu/gpu.c
cb9af9dcd3931eb62bfdb4872c4e3001ff9def26 - src/nvidia/src/kernel/gpu/gpu_rmapi.c
bfcdb98c6541f95c3a37aaa25e9ca51ec2a0b9c1 - src/nvidia/src/kernel/gpu/eng_state.c
6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - src/nvidia/src/kernel/gpu/gpu_resource_desc.c
ceb3639a86578b9d823a00a9a6553f278acb558f - src/nvidia/src/kernel/gpu/gpu_resource.c
bca16e8ff1697e953a54a3a3de4273f5584ac0df - src/nvidia/src/kernel/gpu/device_ctrl.c
493e90398cb78a3f24d2f271bbedebd8c682d7c1 - src/nvidia/src/kernel/gpu/gpu_gspclient.c
1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - src/nvidia/src/kernel/gpu/gpu_uuid.c
a4225e0074c1aee00d082f69231d1d8e7d812347 - src/nvidia/src/kernel/gpu/gpu_access.c
207b32d1423f3666feeedb85d38fa7a924c1f7a9 - src/nvidia/src/kernel/gpu/device_share.c
29458992dabff6c2550e0202b11dc47cd7f66cd5 - src/nvidia/src/kernel/gpu/gpu_engine_type.c
89543f7085fbc2ca01b5a8baae33b5de921c79e9 - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
3229e9f5d2779147d337e9c6a7b6f518079f1709 - src/nvidia/src/kernel/gpu/gpu_timeout.c
c2228fbf8366e197aec9bb75ad2c01b267aedeb7 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c
cf85f6ecacf40fa649de2c443595e2313fa364d6 - src/nvidia/src/kernel/gpu/device.c
cffbdcaacd4fd5be809fc81bd76a384920781391 - src/nvidia/src/kernel/gpu/timer/timer.c
17e9f2af953c3cf96d0eee9cfea3aad6e540c3cf - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
5a053caaa8eb655d9e0f7ab42ec1b3f0b72fb787 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
7f9874d9af6b937dac888a3ebb55a82c2a5de71b - src/nvidia/src/kernel/gpu/dce_client/dce_client.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
d852ad5a6af96e173832833379ae9d38baaed47f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
086e9a51757c3989dfe0bf89ca6c0b9c7734104a - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
c9ec73f6e2f2e87371b97ec47a65c3874dd4949a - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
3d0b8b3dabe8aab7884f1ddec7ef4f9715de31ad - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
ba49fc89b1a453aca3a79f51d3250c7c0a667327 - src/nvidia/src/kernel/gpu/subdevice/subdevice.c
8ce824bfdb06f08567a29ee5e175106c32611182 - src/nvidia/src/kernel/gpu/disp/disp_channel.c
6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
c3d94d9a49e1c0dffd8987d9b007a9cef91be561 - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
4b783bc279ea35c4b7e101a668d136f1a12d9030 - src/nvidia/src/kernel/gpu/disp/kern_disp.c
681499b2c86582cd110ede079d757c5797c4b458 - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
1533c870f3e6521f180eb967f7144a62a727d125 - src/nvidia/src/kernel/gpu/disp/disp_objs.c
ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
84fdcdf90d9a656a572774fb8330f7a1fa9f59e2 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
629566bf98be863b12e6dc6aab53d8f5ea13988c - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
0156d5407cf877b8f5c79823d3c83ead54b6385c - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
8a418dce9fbeb99d5d6e175ed8c88811866f3450 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
e7f143390807f3f4d4bf6586068378a9f5a75d57 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
611098328a114b66c6dcea4a8ea710887db006c4 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
3c463773f2f970b1764edb231d349164fe4341fc - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
c6e78a54a1b8d4ca6fe4b01d83e3199ea41606d7 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
f30ae0e8e1e32d0adb7e52b8995c277637b6bc2a - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
2bb921b462c4b50d1f42b39b4728374c7433c8cb - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
c8c4af5a28740f1e66ff4e6e9c47fc6c981ce46b - src/nvidia/src/kernel/os/os_timer.c
0e0c1b862bdba245297ffd4f725001fa2439cddf - src/nvidia/src/kernel/os/os_sanity.c
1dc0be7577b4f7914743379943bcf0d5e236eb0b - src/nvidia/src/kernel/os/os_stubs.c
1fad27934185df50c1d91b5536d0df437618382f - src/nvidia/src/kernel/os/os_init.c
df7ac5873dc42eafc335a1ddba095fbc8cd1d708 - src/nvidia/src/kernel/core/locks_common.c
61691e21cdabc8919d7b41142c97f510db9c0cc6 - src/nvidia/src/kernel/core/locks_minimal.c
8adbda67510ec9fab31edd681c51ddfb7b190d7d - src/nvidia/src/kernel/core/thread_state.c
db40522057f29afe6624e33468879e5e9813f07c - src/nvidia/src/kernel/core/system.c
afbf166f49a964873a13e19b787cae33813f9de5 - src/nvidia/src/kernel/core/hal_mgr.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c
afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c
c38181e1361a59e3252ae446a0e8761363db35e7 - src/nvidia/src/kernel/core/hal/hals_all.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c
fc39cb6ac6e9d73bd1ab98890e6b253217d6cc90 - src/nvidia/src/kernel/diagnostics/nvlog_printf.c
8192d2364dc63171b51f6ced5b1726125f1a8ff6 - src/nvidia/src/kernel/diagnostics/nvlog.c
2aa207714971c97d9486c1ed48a3123e40b6c4ff - src/nvidia/src/kernel/rmapi/rmapi_cache.c
79a130d1e1e10881ea1e5f5d8dfcb84ceb53b0f2 - src/nvidia/src/kernel/rmapi/client_resource.c
0bded8ce6e3e81de589c4e6fbb611085c705dfcd - src/nvidia/src/kernel/rmapi/event_notification.c
7fdf8e379fd2a5eeae0981bf7328163379279c29 - src/nvidia/src/kernel/rmapi/rmapi_stubs.c
fb2a191dc60c1232c198b1ff9a302883302ca526 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h
bac6ef63d11e87f9a4af3318d5be6860f861a0b9 - src/nvidia/src/kernel/rmapi/rpc_common.c
25ac4188ba55b098321700828a9386a8a6e9f80b - src/nvidia/src/kernel/rmapi/event_buffer.c
a418377318e121a2b2f83f3961da74f09a2123d0 - src/nvidia/src/kernel/rmapi/event.c
5166298f09865066535a3e04c111354ceaefbcbc - src/nvidia/src/kernel/rmapi/control.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h
a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c
19d3213dc7471e7a7d4ff379494f724869638d28 - src/nvidia/src/kernel/rmapi/mapping_cpu.c
8cc578a1e5f534e911ba4b49b58352ef9ea57772 - src/nvidia/src/kernel/rmapi/client.c
cb6835f318c0d871d72185e0ac410d03d788654a - src/nvidia/src/kernel/rmapi/binary_api.c
c59a08852553b5843beec8138caa8e2141d3d759 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h
d964061679e6f3da0e6e6c3b8e0eb93eb31fd3dc - src/nvidia/src/kernel/rmapi/resource.c
96f763eef08f1954d3f07639053db2cde2a01e39 - src/nvidia/src/kernel/rmapi/rmapi.c
b4dc306ae4d4f8850571e2fbbed0114d63f1ba93 - src/nvidia/src/kernel/rmapi/entry_points.c
3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c
7a4e3a3369efd50c9d80eaa73c48852edd6e6966 - src/nvidia/src/kernel/rmapi/rs_utils.c
f04faaeeeda2d799207fd7e0877a2bb6d5363c13 - src/nvidia/src/kernel/rmapi/mapping.c
b001f31a373973b7a4568c411e261aa8f7487441 - src/nvidia/src/kernel/rmapi/alloc_free.c
d6b3b8ac45ede7530028848749820d2cbe0f5d55 - src/nvidia/src/kernel/rmapi/resource_desc.h
ea7b6b816ca16af62c0b2040b8a76c6c10a16053 - src/nvidia/src/kernel/rmapi/resource_list.h
b28d140f1bfe0aac770127e8391400d44d5582e3 - src/nvidia/src/kernel/rmapi/rmapi_finn.c
682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c
9b1453ed00d80034a0d2e3e918d31dbe939177b0 - src/nvidia/src/kernel/rmapi/rmapi_utils.c
bb67ea7ef87ff0148473ebf1165e3afd59d63b20 - src/nvidia/src/kernel/rmapi/sharing.c
c4eeb6d566366ab2b9532f109632d4e14539332c - src/nvidia/src/kernel/rmapi/entry_points.h
a14b8d9a6e029d8a5c571283b520645a562b5c2c - src/nvidia/src/kernel/mem_mgr/vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
623dad3ec0172ed7b3818caece0db5687d587ff3 - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
38b2ed45dc7d7d7172f6d0fd2be31b43e49e41d5 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
ed8316b9cbfe13336af1f8e4cd0b492a21af44b9 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
e75d8a0eb4c22e11ececd24a43ad034bb76f12ce - src/nvidia/src/kernel/mem_mgr/standard_mem.c
630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c
223b7541c7904067914a01e4aa3e589fd1690cb6 - src/nvidia/src/kernel/mem_mgr/system_mem.c
3080c8404e554eba5eac3f6482ed6094d25ccdef - src/nvidia/src/kernel/mem_mgr/mem.c
24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c
a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c
c8f4cf70923179b7c2aaa6bd6b3eedc195655abe - src/nvidia/src/libraries/containers/vector.c
8991136ccb86f511f60254955ac3d86072b071f2 - src/nvidia/src/libraries/containers/map.c
864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c
6553a1c368e9d9709fb89b5e43524757f786c58b - src/nvidia/src/libraries/containers/queue.c
5940d69147d1376b03cd96fa69796360b279ae97 - src/nvidia/src/libraries/containers/list.c
23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c
ea3254ebd278d9efb7dd348e52370d780c23cd94 - src/nvidia/src/libraries/containers/eheap/eheap_old.c
9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c
cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c
0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
619f9f6df576ad20d32c30fd9a69733dc5c19da8 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c
702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h
eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h
9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c
8f41e7127a65102f0035c03536c701b7ecdaa909 - src/nvidia/src/libraries/nvport/string/string_generic.c
caff00b37e7f58fde886abcc2737c08526fa089e - src/nvidia/src/libraries/nvport/memory/memory_generic.h
c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
3e3ab114d56dfcecc2886d8f9cdb8f365c5093c7 - src/nvidia/src/libraries/nvport/memory/memory_tracking.c
522da5465e5596d48cf6393c329811f3c708be19 - src/nvidia/src/libraries/resserv/src/rs_resource.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c
0c9581aa68a77cb9977a7fbcfd2077ccb618206e - src/nvidia/src/libraries/resserv/src/rs_access_rights.c
f55556cd2392f55f2609ef69fca1caf2dd348e3f - src/nvidia/src/libraries/resserv/src/rs_server.c
310a8d3442285113f4ba672ba7fcc7f2aa295c6a - src/nvidia/src/libraries/resserv/src/rs_client.c
dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c
cf48c6335eb7ff27cd7cae0faad77dd98669ad95 - src/nvidia/src/libraries/utils/nvassert.c
d3e5f13be70c8e458401ec9bdad007dfadedcc11 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c
4cfe1ebd2ad6968ed513025aed61ecf2127aa683 - src/nvidia/src/libraries/nvoc/src/runtime.c
b417d06ed1845f5ed69181d8eb9de6b6a87fa973 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - src/nvidia/arch/nvalloc/unix/include/os_custom.h
507d35d1d4c5ba94ef975f75e16c63244d6cd650 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
2d644a3f78bcda50e813b25156e9df07ec6da7b8 - src/nvidia/arch/nvalloc/unix/include/nv.h
3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h
e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
1a98a2aaf386cd3d03b4b5513d6a511c60f71c2c - src/nvidia/arch/nvalloc/unix/include/nv-reg.h
5f2a30347378f2ed028c9fb7c8abea9b6032141c - src/nvidia/arch/nvalloc/unix/include/osapi.h
4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h
ae7d5cb2c57beeea12724e09d957e233a71c12a1 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
7188b83b28051b40cda60f05cacfa12b94ade4dc - src/nvidia/arch/nvalloc/unix/include/osfuncs.h
de6913c5e5092a417530ac9f818497824eab7946 - src/nvidia/arch/nvalloc/unix/include/os-interface.h
ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
a28937330829b4f27a9da5e2c3776ceb293b6085 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
6ca29f3d6b38fb5d05ff222cd1b79ade811a74b2 - src/nvidia/arch/nvalloc/unix/src/osunix.c
8f725a01c2a29658580936a87bdd33308030a332 - src/nvidia/arch/nvalloc/unix/src/os.c
866073d8caaa58055268aa5b3548eec6e1168d04 - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
63edc719390a814eb70290e709634d133ad198cc - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
690927567b5344c8030e2c52d91f824bb94e956c - src/nvidia/arch/nvalloc/unix/src/registry.c
eccfc4f261fd8531254eb2961120073aac9847db - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
4971626589ae66cc273ad11b80f0ab875fb39c05 - src/nvidia/arch/nvalloc/unix/src/osapi.c
68d80083483bf4976d6d83153a3880e5949e0824 - src/nvidia/arch/nvalloc/unix/src/osinit.c
69d2719c759456a22ccc4de470e5d15cf0c3d26c - src/nvidia/arch/nvalloc/unix/src/escape.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
11c6d988bccbdf49ac241d77e6363c7843a0191f - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ac5842e58bf82bb8f0b738695f9b459709f03b92 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h
f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h
a9c2b16261b46eb0f86fc611b8b3b5118e2b4e59 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h
76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h
ce5439e2066933d7d1045b7813ef0195b55e78fc - src/nvidia/inc/kernel/gpu/gpu_engine_type.h
7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h
ce3302c1890e2f7990434f7335cb619b12dee854 - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h
10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h
bf894a769c46d5d173e3875cd9667bb3fe82feb9 - src/nvidia/inc/kernel/gpu/gpu_timeout.h
c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h
6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h
1938fd2511213c8003864d879cf1c41ae1169a5f - src/nvidia/inc/kernel/gpu/gpu_uuid.h
0d29e997f13d314ea320898ffb40b7a3a58898e2 - src/nvidia/inc/kernel/gpu/gpu_child_list.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h
57a4a0d006588395c0b8b6d447acd7b4a9eeeb30 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h
be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
c2957c7f40cc454ba12fd954397fcea5d95ccae5 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h
e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
983bf02af93d39384c8b3ef0306193b63d8e82d9 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
9cef17543abaa167299c57e8f043cb4b975cf640 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
70c31f5c6997542d0a4693b4ad7a6539cc3ec421 - src/nvidia/inc/kernel/gpu/gsp/message_queue.h
408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h
f60f647bcf307f7639bccb99cb0244c7314115a1 - src/nvidia/inc/kernel/os/os_stub.h
c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h
5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h
3e11362627f9ad55e7d657da7929562230220591 - src/nvidia/inc/kernel/platform/sli/sli.h
b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h
42596ff1ef62df0b439e8a1e73c71b495dcf311a - src/nvidia/inc/kernel/core/printf.h
37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h
93f40859dc710fd965a643da1d176790cc8886d5 - src/nvidia/inc/kernel/core/locks.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h
457c02092adfc1587d6e3cd866e28c567acbc43a - src/nvidia/inc/kernel/core/info_block.h
ce992cb08e286a88c491ee8e64019ad5f8493d1b - src/nvidia/inc/kernel/core/thread_state.h
2b41b4346b7d07ca8d505574ea0f9aad6910dd69 - src/nvidia/inc/kernel/core/prelude.h
3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h
b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h
99a27d87c7f1487f8df5781d284c2e9a83525892 - src/nvidia/inc/kernel/rmapi/binary_api.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h
aab23ad58777406fa75b55778adc747f17c1afdb - src/nvidia/inc/kernel/rmapi/rs_utils.h
7646fc9f1d17b29747b457655d65f7cae80ccc33 - src/nvidia/inc/kernel/rmapi/control.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h
497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h
7e1200e609082316ed4bc2d0d925e15396b695a5 - src/nvidia/inc/kernel/rmapi/mapping_list.h
6f0f62525d2b966a24adaaabf19e79e6efc4e572 - src/nvidia/inc/kernel/rmapi/rmapi_utils.h
2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h
1399c6dc08b96577bb778e66730e7f4bcf8e7256 - src/nvidia/inc/kernel/rmapi/rmapi.h
61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h
4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h
ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h
083667047714a008219fa41b3a7deb9803bbe48a - src/nvidia/inc/libraries/poolalloc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h
67ecfa8adcb2b5bb5eb8e425bc5889390fd77ca8 - src/nvidia/inc/libraries/containers/list.h
fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h
4c8c52993d4a99f7552cd10e8c1fc8aea0330a4a - src/nvidia/inc/libraries/containers/vector.h
5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h
9f76ab27650b137566bf49202857c3195674d44a - src/nvidia/inc/libraries/containers/map.h
1dacc1c1efc757c12e4c64eac171474a798b86fd - src/nvidia/inc/libraries/containers/eheap_old.h
63a8244e13f9217461f624ab46281716ef42b20f - src/nvidia/inc/libraries/containers/ringbuf.h
5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h
a23790cded20fe2347c19083f2b7430aeb26ab27 - src/nvidia/inc/libraries/containers/type_safety.h
2eb9b0121765c0a3e1085f41a3d47c89e7d5dcb0 - src/nvidia/inc/libraries/nvlog/nvlog.h
f97ea1dce9d593ecc599df510c98054db2b2d1a2 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h
d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
7f623508b3f3631ce89dad6d8762f593b1ac0d71 - src/nvidia/inc/libraries/tls/tls.h
56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
a5e6f98ac5fb53fd26ee429c65b73fa1a4715631 - src/nvidia/inc/libraries/ioaccess/ioaccess.h
7d8efe42c402cbbdd1710ef1f7498bf3e883a743 - src/nvidia/inc/libraries/nvport/string.h
6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h
4e25b80a74aad3f6403d7c34cd55f0ed58824888 - src/nvidia/inc/libraries/nvport/cpu.h
0fe8c0bd2791b105baf7cad7a90797ed9f743115 - src/nvidia/inc/libraries/nvport/memory.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h
87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h
f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h
fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h
199df020beb31a865f19ceec20f8f758e757c39a - src/nvidia/inc/libraries/nvport/debug.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h
f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h
254e86ee0c1d5c0ad652bc1f3182b46f6d5c0f3b - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
ba267abed142db81efe7807b53c26ab4345da286 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
23afbd04f4e4b3301edcfdec003c8e936d898e38 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h
bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h
cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h
1d04abec9438189995cb2a675f4e35a79599aae4 - src/nvidia/inc/libraries/resserv/rs_client.h
98fa7e07b6b41d1ba4ace1de93b7d7ddfd1d7c20 - src/nvidia/inc/libraries/resserv/rs_resource.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h
290f84ec0b699931373eea3cd84437faf578e4a3 - src/nvidia/inc/libraries/resserv/resserv.h
3e431d72308a8b5fc423901a09079904a644b96e - src/nvidia/inc/libraries/resserv/rs_server.h
c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h
1aabd992631089ec24621835e046ddf2e2fd4232 - src/nvidia/inc/libraries/utils/nvbitvector.h
77db350059fa3326500af4269f09e1f02c1ab07b - src/nvidia/inc/libraries/utils/nvassert.h
d229861edca62007af83b86aa7fc1c77e957aa6f - src/nvidia/inc/libraries/utils/nvprintf.h
d0458cdc61eb650d57429f9ae58e60a62ab93025 - src/nvidia/inc/libraries/utils/nvrange.h
9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h
e35ff9733ea7fbffe0641399ccb0fd92a492e30d - src/nvidia/inc/libraries/nvoc/runtime.h
85b30b26f790b55f5370bbe9bb07349c62353841 - src/nvidia/inc/libraries/nvoc/object.h
664ff0e10e893923b70425fa49c9c48ed0735573 - src/nvidia/inc/libraries/nvoc/rtti.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h
3919368b5b4cdd72d7da49801232048b5e786845 - src/nvidia/inc/libraries/nvoc/prelude.h
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf
0a6f3c96043c01acbbb789874a7579728b89fcfd - kernel-open/Kbuild
4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - kernel-open/Makefile
17b99572fb2564a0817477e8b3da810ab6f6e477 - kernel-open/conftest.sh
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia-modeset/nv-kthread-q.c
2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h
7dbe6f8405e47c1380c6151c7c7d12b0b02ef7f4 - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
252660f72b80add6f6071dd0b86288dda8dbb168 - kernel-open/nvidia-modeset/nvkms.h
6e4ae13d024a1df676736752df805b6f91511009 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
487d949cacad8a734bab459c962a157fe56d373f - kernel-open/nvidia-modeset/nvidia-modeset-linux.c
b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h
57937fb42f6fb312f7c3cf63aa399e43bad13c8c - kernel-open/common/inc/nv-proto.h
751abf80513898b35a6449725e27724b1e23ac50 - kernel-open/common/inc/nvmisc.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h
b417d06ed1845f5ed69181d8eb9de6b6a87fa973 - kernel-open/common/inc/nv-firmware.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h
b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h
507d35d1d4c5ba94ef975f75e16c63244d6cd650 - kernel-open/common/inc/nv-ioctl.h
fc319569799d54944cd09b0e170e29d67b33072d - kernel-open/common/inc/nv.h
fa267c903e9c449e62dbb6945906400d43417eff - kernel-open/common/inc/nvlimits.h
891192c9aabdb45fb4a798cc24cd89d205972d3f - kernel-open/common/inc/nv_uvm_types.h
a0c57e8ffbe1ae12de70e56b740737dae5394a18 - kernel-open/common/inc/nv-linux.h
689d6be9302d488000e57a329373feeb14e93798 - kernel-open/common/inc/nv-procfs-utils.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h
0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h
b7f5d125ca0cbd4631012894b635a58cfc9f8e06 - kernel-open/common/inc/nv-pgprot.h
b15c5fe5d969414640a2cb374b707c230e7597e4 - kernel-open/common/inc/nv-hash.h
e1144f5bd643d24f67b7577c16c687294cb50d39 - kernel-open/common/inc/rm-gpu-ops.h
4a97d807a225d792544578f8112c9a3f90cc38f6 - kernel-open/common/inc/nvstatuscodes.h
d51449fa2fd19748007f2e98f0233c92b45f9572 - kernel-open/common/inc/nvkms-api-types.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h
bf4fdaa93deed0b110d5ca954a1f9678ffaabc6e - kernel-open/common/inc/nv-platform.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h
5cf4b517c9bd8f14593c1a6450078a774a39dd08 - kernel-open/common/inc/nv-hypervisor.h
ceac0fe7333f3a67b8fb63de42ab567dd905949f - kernel-open/common/inc/nv-ioctl-numa.h
8eae29f78efd73bf16437e439294d21ae71db9f3 - kernel-open/common/inc/nv-mm.h
36c20e9c111e66601b025802f840e7b87d09cdde - kernel-open/common/inc/nvkms-kapi.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h
19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h
1d17329caf26cdf931122b3c3b7edf4932f43c38 - kernel-open/common/inc/nv-msi.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h
4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - kernel-open/common/inc/nv-register-module.h
4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - kernel-open/common/inc/nv-ioctl-numbers.h
03257213e55fff1c07c75c6dcf69afa920372822 - kernel-open/common/inc/nvtypes.h
d25291d32caef187daf3589ce4976e4fa6bec70d - kernel-open/common/inc/nv-time.h
1c49c1642d44ec347f82ff0aa06d0fca6213bad2 - kernel-open/common/inc/nvimpshared.h
e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - kernel-open/common/inc/nv_speculation_barrier.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h
4856fe869a5f3141e5d7f7d1b0a6affad94cbc31 - kernel-open/common/inc/nv-pci.h
3603c631c6cf784ec862e4e45f05939d98679002 - kernel-open/common/inc/nv-kthread-q.h
d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h
906329ae5773732896e6fe94948f7674d0b04c17 - kernel-open/common/inc/os_gpio.h
c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h
7b2e2e6ff278acddc6980b330f68e374f38e0a6c - kernel-open/common/inc/nv-timer.h
f428218ee6f5d0289602495a1cfb287db4fb0823 - kernel-open/common/inc/nv_uvm_interface.h
de6913c5e5092a417530ac9f818497824eab7946 - kernel-open/common/inc/os-interface.h
e42d91cd7e6c17796fa89a172146950261f45d42 - kernel-open/common/inc/nv-lock.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h
86443277db67b64c70260e5668bb4140bc90165c - kernel-open/nvidia/nv-clk.c
ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c
4c64885083621f5f313a7dee72e14eee8abed2a0 - kernel-open/nvidia/nvidia-sources.Kbuild
f701fb148bda4eb03332ab45194a4824e499cab7 - kernel-open/nvidia/nv-platform.c
e5cd40b060a69cf71220c910e9428d7f261892f7 - kernel-open/nvidia/internal_crypt_lib.h
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia/nv-kthread-q.c
4e5a330fa40dab218821976ac1b530c649d48994 - kernel-open/nvidia/libspdm_ecc.c
0a3ad5cdacfe156b02f53c0087bdc0ec9509cd6a - kernel-open/nvidia/nv-ipc-soc.c
6e669fe32e4b69dcdbc9739dc8a45fb800547d53 - kernel-open/nvidia/nv-p2p.c
ab04c42e0e8e7f48f1a7074885278bbb6006d65f - kernel-open/nvidia/nv-bpmp.c
95ae148b016e4111122c2d9f8f004b53e78998f3 - kernel-open/nvidia/nv-memdbg.c
fbae5663e3c278d8206d07ec6446ca4c2781795f - kernel-open/nvidia/nv-ibmnpu.h
ec3055aa73c6c65b601ea040989f0b638a847e86 - kernel-open/nvidia/os-interface.c
dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c
980556d84bc56e819955b9338a43a9d970dba11d - kernel-open/nvidia/nv_gpu_ops.h
4eee7319202366822e17d29ecec9f662c075e7ac - kernel-open/nvidia/nv-rsync.c
2f6e4c6ee6f809097c8b07a7b698e8614bf25e57 - kernel-open/nvidia/nv-caps.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c
57a06cab892f111b0fb1ebe182c0c688560e750e - kernel-open/nvidia/nvspdm_cryptlib_extensions.h
189eebce734b698f0fd0b60290eca7922b865888 - kernel-open/nvidia/nv-imp.c
8bedc7374d7a43250e49fb09139c511b489d45e3 - kernel-open/nvidia/nv-pci-table.h
68d781e929d103e6fa55fa92b5d4f933fbfb6526 - kernel-open/nvidia/nv-report-err.h
94c406f36836c3396b0ca08b4ff71496666b9c43 - kernel-open/nvidia/os-usermap.c
dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - kernel-open/nvidia/nv-modeset-interface.c
06e7ec77cd21c43f900984553a4960064753e444 - kernel-open/nvidia/nv-platform-pm.c
cf98395acb4430a7c105218f7a4b5f7e810b39cf - kernel-open/nvidia/os-registry.c
7b1bd10726481626dd51f4eebb693794561c20f6 - kernel-open/nvidia/nv-host1x.c
1a98a2aaf386cd3d03b4b5513d6a511c60f71c2c - kernel-open/nvidia/nv-reg.h
42b9924aa348e9b23dffba9b613108d58f3a671e - kernel-open/nvidia/nv.c
37654472e65659be229b5e35c6f25c0724929511 - kernel-open/nvidia/nv-frontend.c
d9221522e02e18b037b8929fbc075dc3c1e58654 - kernel-open/nvidia/nv-pci-table.c
94344ec0af21bd9c7c7ab912f7bd3a8668a3e0aa - kernel-open/nvidia/os-pci.c
b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c
70a9117dce7471a07178d9456b146a033d6b544b - kernel-open/nvidia/nv-dma.c
946fb049ca50c9bb39897eca4b8443278043eea2 - kernel-open/nvidia/nv-vm.c
6710f4603a9d3e14bcaefdf415b1cfff9ec9b7ec - kernel-open/nvidia/libspdm_aead.c
e0aff92ee8ddec261d8f0d81c41f837503c4b571 - kernel-open/nvidia/nv-dsi-parse-panel-props.c
cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - kernel-open/nvidia/nv-rsync.h
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h
dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c
2fab5ae911554508e6e7a3b25824e8b2c27e85c2 - kernel-open/nvidia/nv-ibmnpu.c
ce537a7d786bd11a4429bf7c59836d5373a66f61 - kernel-open/nvidia/nv-i2c.c
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h
64f1c96761f6d9e7e02ab049dd0c810196568036 - kernel-open/nvidia/nv-pat.c
9104dc5f36a825aaf1208b54b167965625d4a433 - kernel-open/nvidia/nv_uvm_interface.c
9b701fe42a0e87d62c58b15c553086a608e89f7b - kernel-open/nvidia/nv-frontend.h
02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c
11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c
5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - kernel-open/nvidia/nv-vtophys.c
9883eb32e5d4377c3dce1c7cb54d0e05c05e128b - kernel-open/nvidia/nv-mmap.c
01d4701e8302e345275f1ec60b9718e645b5663c - kernel-open/nvidia/libspdm_x509.c
e8daae4e6106429378673988293aaa1fcd80f0eb - kernel-open/nvidia/nv-pci.c
69f203ad21e643f7b7c85e7e86bd4b674a3536de - kernel-open/nvidia/nv-acpi.c
8c9fd9590d7e3ad333ae03d5f22b72ffbdbe6e70 - kernel-open/nvidia/nv-dmabuf.c
c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - kernel-open/nvidia/nv-report-err.c
c1ebcfec42f7898dd9d909eacd439d288b80523f - kernel-open/nvidia/os-mlock.c
d68af9144d3d487308e73d0a52f4474f8047d6ca - kernel-open/nvidia/nv-gpio.c
7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - kernel-open/nvidia/nv-usermap.c
e0a37b715684ae0f434327e4ce1b5832caf7ea4e - kernel-open/nvidia/nv-nano-timer.c
3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c
fc22bea3040ae178492cb9c7a62f1d0012b1c113 - kernel-open/nvidia/nv-procfs.c
a46f27be57870c7669f3e43fffb7e1fdaff5a3d6 - kernel-open/nvidia/nvidia.Kbuild
6060392eec4e707ac61ebca3995b6a966eba7fc1 - kernel-open/nvidia/nv-p2p.h
642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c
8f87a475c202458948025d1521968677fc11dd50 - kernel-open/nvidia/nv-msi.c
e2da77ff1bc25c0b1de69af7c09e0bde26c34e30 - kernel-open/nvidia/libspdm_shash.c
9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
95b97f5a3ddcf73ed5d7fa0be9e27aec776d7c13 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
c276be3eb63bb451edfe9ed13859c251530743e6 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
5b79fbc90502b1ba8d1f9966fc7b9a6fd7ef07b4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
92ab7c0bf545029c4c1d9a0ab68b53eedc655f9c - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
d007df1d642e836595331598ca0313084922f3ee - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
7398ff33b24fa58315cc40776bc3451e090aa437 - kernel-open/nvidia/internal/libspdm_lib_config.h
19b5d633f4560d545f622ada0dd352d5aa02c651 - kernel-open/nvidia/library/cryptlib.h
d5ddc354e191d6178625b0df8e8b34e8c3e4c474 - kernel-open/nvidia/library/spdm_lib_config.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h
66b33e4ac9abe09835635f6776c1222deefad741 - kernel-open/nvidia-drm/nvidia-drm-fb.h
23586447526d9ffedd7878b6cf5ba00139fadb5e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
6d65ea9f067e09831a8196022bfe00a145bec270 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
646e6b03521587cc1a02617afd697183e5d1a83a - kernel-open/nvidia-drm/nv-kthread-q.c
c1af941dd5144b05995dcf5721652a4f126e175f - kernel-open/nvidia-drm/nvidia-drm-priv.h
c52acdbc07f16aa78570d9e6a7f62e493264fde1 - kernel-open/nvidia-drm/nvidia-drm-helper.c
337d6b7013c2527d7efdb7ef87f335024ed140a8 - kernel-open/nvidia-drm/nvidia-drm-drv.c
511ea7cd9e7778c6adc028ae13377c1a8856b72a - kernel-open/nvidia-drm/nvidia-drm-format.c
e362c64aa67b47becdbf5c8ba2a245e135adeedf - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
55e26337c0d52b5ec4f6ab403e9306417d2893f8 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
672afea77ca2c2575f278d9e182ba1188e35e971 - kernel-open/nvidia-drm/nvidia-drm-encoder.c
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h
2c0518192eac1a1877eef0dbf7b668e8450d0821 - kernel-open/nvidia-drm/nvidia-drm-helper.h
273d0cafeb0f21bf9b7d189f2dc6278e1a3c9672 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h
eb98761cdc99141ad937966e5533c57189db376a - kernel-open/nvidia-drm/nvidia-drm-fence.h
8bedc7374d7a43250e49fb09139c511b489d45e3 - kernel-open/nvidia-drm/nv-pci-table.h
8da06bd922850e840c94ed380e3b92c63aecbf70 - kernel-open/nvidia-drm/nvidia-drm-fb.c
044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - kernel-open/nvidia-drm/nvidia-drm-conftest.h
8b2063f0cc2e328f4f986c2ce556cfb626c89810 - kernel-open/nvidia-drm/nvidia-drm-utils.c
487db563f4e5153ffc976fc2aa26636ebb4cd534 - kernel-open/nvidia-drm/nvidia-drm-crtc.h
deb00fa4d1de972d93d8e72355d81ba87044c86f - kernel-open/nvidia-drm/nvidia-drm-fence.c
dc0fe38909e2f38e919495b7b4f21652a035a3ee - kernel-open/nvidia-drm/nvidia-drm.c
203295380efca7e422746805437b05ce22505424 - kernel-open/nvidia-drm/nvidia-drm-gem.c
1f0cdee2468f842c06bb84aceef60e0723023084 - kernel-open/nvidia-drm/nvidia-drm-linux.c
97b6c56b1407de976898e0a8b5a8f38a5211f8bb - kernel-open/nvidia-drm/nvidia-drm-format.h
d9221522e02e18b037b8929fbc075dc3c1e58654 - kernel-open/nvidia-drm/nv-pci-table.c
ec550cba2bebff2c5054b6e12fc43d81e37ade48 - kernel-open/nvidia-drm/nvidia-dma-fence-helper.h
8a8b431f45bd0fe477759c1527d792cb9a1fa3f5 - kernel-open/nvidia-drm/nvidia-drm-gem.h
79bcf373ff7d728740716acde5e2d44e924efefa - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
734f8ad9fbbe2e07b7d8c38455f66be9f75de127 - kernel-open/nvidia-drm/nvidia-drm-crtc.c
6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h
b91df730fba3c2f9401321557bb1bc2e64bbf980 - kernel-open/nvidia-drm/nvidia-drm-connector.h
eca70b3b8146903ec678a60eebb0462e6ccf4569 - kernel-open/nvidia-drm/nvidia-drm-encoder.h
efeac7919ce5bbdab479fe5489e9ea84d2cee0a2 - kernel-open/nvidia-drm/nvidia-drm.Kbuild
4b68b6cb0f98116376be36733f5ae60eec85d78d - kernel-open/nvidia-drm/nvidia-drm-ioctl.h
61c61f91d1a29d6f7794a67eac337152b58aaac0 - kernel-open/nvidia-drm/nvidia-drm-connector.c
fe9132110f104ff7ebba922ce6dd66a2d08a998d - kernel-open/nvidia-drm/nvidia-drm-modeset.c
2eba218d75f3802d7bab34d0dd6320f872b2d604 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
9a882b31b2acc9e1ad3909c0061eee536e648aae - kernel-open/nvidia-drm/nvidia-drm-drv.h

Change-Id: I4c402d345459fbad7b568857262f5190b68f4ead
This commit is contained in:
svcmobrel-release
2023-12-05 20:47:41 -08:00
parent d92e92ae48
commit 94a016aa32
1276 changed files with 1275 additions and 1275 deletions

View File

@@ -0,0 +1,329 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION_ONSTACK(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "nv-pci-table.h"
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{ }
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
.class_mask = ~0
},
{ }
};
MODULE_DEVICE_TABLE(pci, nv_module_device_table);

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TABLE_H_
#define _NV_PCI_TABLE_H_
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
#endif /* _NV_PCI_TABLE_H_ */

View File

@@ -0,0 +1,154 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_FENCE_HELPER_H__
#define __NVIDIA_DMA_FENCE_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* Fence headers are moved to file dma-fence.h and struct fence has
* been renamed to dma_fence by commit -
*
* 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429
*/
#if defined(NV_LINUX_FENCE_H_PRESENT)
#include <linux/fence.h>
#else
#include <linux/dma-fence.h>
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
typedef struct fence_cb nv_dma_fence_cb_t;
typedef fence_func_t nv_dma_fence_func_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
typedef struct dma_fence_cb nv_dma_fence_cb_t;
typedef dma_fence_func_t nv_dma_fence_func_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
#else
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
#endif
static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_is_signaled(fence);
#else
return dma_fence_is_signaled(fence);
#endif
}
static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_get(fence);
#else
return dma_fence_get(fence);
#endif
}
static inline void nv_dma_fence_put(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_put(fence);
#else
dma_fence_put(fence);
#endif
}
static inline signed long
nv_dma_fence_default_wait(nv_dma_fence_t *fence,
bool intr, signed long timeout) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_default_wait(fence, intr, timeout);
#else
return dma_fence_default_wait(fence, intr, timeout);
#endif
}
static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal(fence);
#else
return dma_fence_signal(fence);
#endif
}
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal_locked(fence);
#else
return dma_fence_signal_locked(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
#else
return dma_fence_context_alloc(num);
#endif
}
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, uint64_t seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
dma_fence_init(fence, ops, lock, context, seqno);
#endif
}
static inline void
nv_dma_fence_set_error(nv_dma_fence_t *fence,
int error) {
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
return dma_fence_set_error(fence, error);
#else
fence->status = error;
#endif
}
static inline int
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb,
nv_dma_fence_func_t func) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_add_callback(fence, cb, func);
#else
return dma_fence_add_callback(fence, cb, func);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -0,0 +1,126 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_RESV_HELPER_H__
#define __NVIDIA_DMA_RESV_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* linux/reservation.h is renamed to linux/dma-resv.h, by commit
* 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv)
* in v5.4.
*/
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#include <linux/dma-resv.h>
#else
#include <linux/reservation.h>
#endif
#include <nvidia-dma-fence-helper.h>
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
typedef struct dma_resv nv_dma_resv_t;
#else
typedef struct reservation_object nv_dma_resv_t;
#endif
static inline void nv_dma_resv_init(nv_dma_resv_t *obj)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_init(obj);
#else
reservation_object_init(obj);
#endif
}
static inline void nv_dma_resv_fini(nv_dma_resv_t *obj)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_fini(obj);
#else
reservation_object_init(obj);
#endif
}
static inline void nv_dma_resv_lock(nv_dma_resv_t *obj,
struct ww_acquire_ctx *ctx)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_lock(obj, ctx);
#else
ww_mutex_lock(&obj->lock, ctx);
#endif
}
static inline void nv_dma_resv_unlock(nv_dma_resv_t *obj)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_unlock(obj);
#else
ww_mutex_unlock(&obj->lock);
#endif
}
static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj,
unsigned int num_fences,
NvBool shared)
{
#if defined(NV_DMA_RESV_RESERVE_FENCES_PRESENT)
return dma_resv_reserve_fences(obj, num_fences);
#else
if (shared) {
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
return dma_resv_reserve_shared(obj, num_fences);
#elif defined(NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG)
return reservation_object_reserve_shared(obj, num_fences);
#else
unsigned int i;
for (i = 0; i < num_fences; i++) {
reservation_object_reserve_shared(obj);
}
#endif
}
return 0;
#endif
}
static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_WRITE);
#else
dma_resv_add_excl_fence(obj, fence);
#endif
#else
reservation_object_add_excl_fence(obj, fence);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CONFTEST_H__
#define __NVIDIA_DRM_CONFTEST_H__
#include "conftest.h"
/*
* NOTE: This file is expected to get included at the top before including any
* of linux/drm headers.
*
* The goal is to redefine refcount_dec_and_test and refcount_inc before
* including drm header files, so that the drm macro/inline calls to
* refcount_dec_and_test* and refcount_inc get redirected to
* alternate implementation in this file.
*/
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc
#include <linux/refcount.h>
#define refcount_inc(__ptr) \
do { \
atomic_inc(&(__ptr)->refs); \
} while(0)
#endif
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test
#include <linux/refcount.h>
#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs)
#endif
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \
defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#define NV_DRM_FENCE_AVAILABLE
#else
#undef NV_DRM_FENCE_AVAILABLE
#endif
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */

View File

@@ -0,0 +1,507 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-helper.h"
#include "nvidia-drm-priv.h"
#include "nvidia-drm-connector.h"
#include "nvidia-drm-crtc.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-encoder.h"
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
* moves a number of helper function definitions from
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
*/
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
#include <drm/drm_probe_helper.h>
#endif
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
static void nv_drm_connector_destroy(struct drm_connector *connector)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (nv_connector->edid != NULL) {
nv_drm_free(nv_connector->edid);
}
nv_drm_free(nv_connector);
}
static bool
__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams,
struct drm_connector *connector,
struct drm_encoder *encoder)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_encoder;
/*
* DVI-I connectors can drive both digital and analog
* encoders. If a digital connection has been forced then
* skip analog encoders.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII &&
connector->force == DRM_FORCE_ON_DIGITAL &&
encoder->encoder_type == DRM_MODE_ENCODER_DAC) {
return false;
}
nv_encoder = to_nv_encoder(encoder);
memset(pDetectParams, 0, sizeof(*pDetectParams));
pDetectParams->handle = nv_encoder->hDisplay;
switch (connector->force) {
case DRM_FORCE_ON:
case DRM_FORCE_ON_DIGITAL:
pDetectParams->forceConnected = NV_TRUE;
break;
case DRM_FORCE_OFF:
pDetectParams->forceDisconnected = NV_TRUE;
break;
case DRM_FORCE_UNSPECIFIED:
break;
}
#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID)
if (connector->override_edid) {
#else
if (drm_edid_override_connector_update(connector) > 0) {
#endif
const struct drm_property_blob *edid = connector->edid_blob_ptr;
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {
memcpy(pDetectParams->edid.buffer, edid->data, edid->length);
pDetectParams->edid.bufferSize = edid->length;
pDetectParams->overrideEdid = NV_TRUE;
} else {
WARN_ON(edid->length >
sizeof(pDetectParams->edid.buffer));
}
}
if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to detect display state");
return false;
}
#if defined(NV_DRM_CONNECTOR_HAS_VRR_CAPABLE_PROPERTY)
drm_connector_attach_vrr_capable_property(&nv_connector->base);
drm_connector_set_vrr_capable_property(&nv_connector->base, pDetectParams->vrrSupported ? true : false);
#endif
if (pDetectParams->connected) {
if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) {
if ((nv_connector->edid = nv_drm_calloc(
1,
pDetectParams->edid.bufferSize)) != NULL) {
memcpy(nv_connector->edid,
pDetectParams->edid.buffer,
pDetectParams->edid.bufferSize);
} else {
NV_DRM_LOG_ERR("Out of Memory");
}
}
return true;
}
return false;
}
static enum drm_connector_status __nv_drm_connector_detect_internal(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
enum drm_connector_status status = connector_status_disconnected;
struct drm_encoder *detected_encoder = NULL;
struct nv_drm_encoder *nv_detected_encoder = NULL;
struct drm_encoder *encoder;
struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL;
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
if (nv_connector->edid != NULL) {
nv_drm_free(nv_connector->edid);
nv_connector->edid = NULL;
}
if ((pDetectParams = nv_drm_calloc(
1,
sizeof(*pDetectParams))) == NULL) {
WARN_ON(pDetectParams == NULL);
goto done;
}
nv_drm_connector_for_each_possible_encoder(connector, encoder) {
if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) {
detected_encoder = encoder;
break;
}
} nv_drm_connector_for_each_possible_encoder_end;
if (detected_encoder == NULL) {
goto done;
}
nv_detected_encoder = to_nv_encoder(detected_encoder);
status = connector_status_connected;
nv_connector->nv_detected_encoder = nv_detected_encoder;
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) {
drm_object_property_set_value(
&connector->base,
dev->mode_config.dvi_i_subconnector_property,
detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ?
DRM_MODE_SUBCONNECTOR_DVIA :
DRM_MODE_SUBCONNECTOR_DVID);
}
done:
nv_drm_free(pDetectParams);
if (status == connector_status_disconnected &&
nv_connector->modeset_permission_filep) {
nv_drm_connector_revoke_permissions(dev, nv_connector);
}
return status;
}
static void __nv_drm_connector_force(struct drm_connector *connector)
{
__nv_drm_connector_detect_internal(connector);
}
static enum drm_connector_status
nv_drm_connector_detect(struct drm_connector *connector, bool force)
{
return __nv_drm_connector_detect_internal(connector);
}
static struct drm_connector_funcs nv_connector_funcs = {
#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT
.dpms = drm_atomic_helper_connector_dpms,
#endif
.destroy = nv_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.force = __nv_drm_connector_force,
.detect = nv_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int nv_drm_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
struct nv_drm_encoder *nv_detected_encoder =
nv_connector->nv_detected_encoder;
NvU32 modeIndex = 0;
int count = 0;
if (nv_connector->edid != NULL) {
nv_drm_connector_update_edid_property(connector, nv_connector->edid);
}
while (1) {
struct drm_display_mode *mode;
struct NvKmsKapiDisplayMode displayMode;
NvBool valid = 0;
NvBool preferredMode = NV_FALSE;
int ret;
ret = nvKms->getDisplayMode(nv_dev->pDevice,
nv_detected_encoder->hDisplay,
modeIndex++, &displayMode, &valid,
&preferredMode);
if (ret < 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x",
modeIndex, nv_detected_encoder->hDisplay);
break;
}
/* Is end of mode-list */
if (ret == 0) {
break;
}
/* Ignore invalid modes */
if (!valid) {
continue;
}
mode = drm_mode_create(connector->dev);
if (mode == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to create mode for NvKmsKapiDisplay 0x%08x",
nv_detected_encoder->hDisplay);
continue;
}
nvkms_display_mode_to_drm_mode(&displayMode, mode);
if (preferredMode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/* Add a mode to a connector's probed_mode list */
drm_mode_probed_add(connector, mode);
count++;
}
return count;
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_detected_encoder =
to_nv_connector(connector)->nv_detected_encoder;
struct NvKmsKapiDisplayMode displayMode;
if (nv_detected_encoder == NULL) {
return MODE_BAD;
}
drm_mode_to_nvkms_display_mode(mode, &displayMode);
if (!nvKms->validateDisplayMode(nv_dev->pDevice,
nv_detected_encoder->hDisplay,
&displayMode)) {
return MODE_BAD;
}
return MODE_OK;
}
static struct drm_encoder*
nv_drm_connector_best_encoder(struct drm_connector *connector)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->nv_detected_encoder != NULL) {
return &nv_connector->nv_detected_encoder->base;
}
return NULL;
}
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
.get_modes = nv_drm_connector_get_modes,
.mode_valid = nv_drm_connector_mode_valid,
.best_encoder = nv_drm_connector_best_encoder,
};
static struct drm_connector*
nv_drm_connector_new(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *nv_connector = NULL;
int ret = -ENOMEM;
if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) {
goto failed;
}
if ((nv_connector->base.state =
nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) {
goto failed_state_alloc;
}
nv_connector->base.state->connector = &nv_connector->base;
nv_connector->physicalIndex = physicalIndex;
nv_connector->type = type;
nv_connector->internal = internal;
nv_connector->modeset_permission_filep = NULL;
nv_connector->modeset_permission_crtc = NULL;
strcpy(nv_connector->dpAddress, dpAddress);
ret = drm_connector_init(
dev,
&nv_connector->base, &nv_connector_funcs,
nvkms_connector_type_to_drm_connector_type(type, internal));
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize connector created from physical index %u",
nv_connector->physicalIndex);
goto failed_connector_init;
}
drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs);
nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD;
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) {
nv_connector->base.polled =
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
/* Register connector with DRM subsystem */
ret = drm_connector_register(&nv_connector->base);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to register connector created from physical index %u",
nv_connector->physicalIndex);
goto failed_connector_register;
}
return &nv_connector->base;
failed_connector_register:
drm_connector_cleanup(&nv_connector->base);
failed_connector_init:
nv_drm_free(nv_connector->base.state);
failed_state_alloc:
nv_drm_free(nv_connector);
failed:
return ERR_PTR(ret);
}
/*
* Get connector with given physical index one exists. Otherwise, create and
* return a new connector.
*/
struct drm_connector*
nv_drm_get_connector(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct drm_connector *connector = NULL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#else
struct drm_mode_config *config = &dev->mode_config;
mutex_lock(&config->mutex);
#endif
/* Lookup for existing connector with same physical index */
nv_drm_for_each_connector(connector, &conn_iter, dev) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->physicalIndex == physicalIndex) {
BUG_ON(nv_connector->type != type ||
nv_connector->internal != internal);
if (strcmp(nv_connector->dpAddress, dpAddress) == 0) {
goto done;
}
}
}
connector = NULL;
done:
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#else
mutex_unlock(&config->mutex);
#endif
if (!connector) {
connector = nv_drm_connector_new(dev,
physicalIndex, type, internal,
dpAddress);
}
return connector;
}
/*
* Revoke the permissions on this connector.
*/
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
struct nv_drm_connector* nv_connector)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
bool ret = true;
if (nv_connector->modeset_permission_crtc) {
if (nv_connector->nv_detected_encoder) {
ret = nvKms->revokePermissions(
nv_dev->pDevice, nv_connector->modeset_permission_crtc->head,
nv_connector->nv_detected_encoder->hDisplay);
}
nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL;
nv_connector->modeset_permission_crtc = NULL;
}
nv_connector->modeset_permission_filep = NULL;
return ret;
}
#endif

View File

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CONNECTOR_H__
#define __NVIDIA_DRM_CONNECTOR_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
#include "nvtypes.h"
#include "nvkms-api-types.h"
struct nv_drm_connector {
NvU32 physicalIndex;
NvBool internal;
NvKmsConnectorType type;
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH];
struct nv_drm_encoder *nv_detected_encoder;
struct edid *edid;
atomic_t connection_status_dirty;
/**
* @modeset_permission_filep:
*
* The filep using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
*/
struct drm_file *modeset_permission_filep;
/**
* @modeset_permission_crtc:
*
* The crtc using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
*/
struct nv_drm_crtc *modeset_permission_crtc;
struct drm_connector base;
};
static inline struct nv_drm_connector *to_nv_connector(
struct drm_connector *connector)
{
if (connector == NULL) {
return NULL;
}
return container_of(connector, struct nv_drm_connector, base);
}
static inline void nv_drm_connector_mark_connection_status_dirty(
struct nv_drm_connector *nv_connector)
{
atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true);
}
static inline bool nv_drm_connector_check_connection_status_dirty_and_clear(
struct nv_drm_connector *nv_connector)
{
return atomic_cmpxchg(
&nv_connector->connection_status_dirty,
true,
false) == true;
}
struct drm_connector*
nv_drm_get_connector(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]);
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
struct nv_drm_connector *nv_connector);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_CONNECTOR_H__ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,283 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CRTC_H__
#define __NVIDIA_DRM_CRTC_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-helper.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <drm/drm_crtc.h>
#include "nvtypes.h"
#include "nvkms-kapi.h"
struct nv_drm_crtc {
NvU32 head;
/**
* @flip_list:
*
* List of flips pending to get processed by __nv_drm_handle_flip_event().
* Protected by @flip_list_lock.
*/
struct list_head flip_list;
/**
* @flip_list_lock:
*
* Spinlock to protect @flip_list.
*/
spinlock_t flip_list_lock;
/**
* @modeset_permission_filep:
*
* The filep using this crtc with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
*/
struct drm_file *modeset_permission_filep;
struct drm_crtc base;
};
/**
* struct nv_drm_flip - flip state
*
* This state is getting used to consume DRM completion event associated
* with each crtc state from atomic commit.
*
* Function nv_drm_atomic_apply_modeset_config() consumes DRM completion
* event, save it into flip state associated with crtc and queue flip state into
* crtc's flip list and commits atomic update to hardware.
*/
struct nv_drm_flip {
/**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of
* the state update.
*/
struct drm_pending_vblank_event *event;
/**
* @pending_events
*
* Number of HW events pending to signal completion of the state
* update.
*/
uint32_t pending_events;
/**
* @list_entry:
*
* Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by
* &nv_drm_crtc.flip_list_lock.
*/
struct list_head list_entry;
/**
* @deferred_flip_list
*
* List flip objects whose processing is deferred until processing of
* this flip object. Protected by &nv_drm_crtc.flip_list_lock.
* nv_drm_atomic_commit() gets last flip object from
* nv_drm_crtc:flip_list and add deferred flip objects into
* @deferred_flip_list, __nv_drm_handle_flip_event() processes
* @deferred_flip_list.
*/
struct list_head deferred_flip_list;
};
struct nv_drm_crtc_state {
/**
* @base:
*
* Base DRM crtc state object for this.
*/
struct drm_crtc_state base;
/**
* @head_req_config:
*
* Requested head's modeset configuration corresponding to this crtc state.
*/
struct NvKmsKapiHeadRequestedConfig req_config;
/**
* @nv_flip:
*
* Flip state associated with this crtc state, gets allocated
* by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets
* consumed and queued into flip list by
* nv_drm_atomic_apply_modeset_config() and finally gets destroyed
* by __nv_drm_handle_flip_event() after getting processed.
*
* In case of failure of atomic commit, this flip state getting destroyed by
* nv_drm_atomic_crtc_destroy_state().
*/
struct nv_drm_flip *nv_flip;
};
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
{
return container_of(state, struct nv_drm_crtc_state, base);
}
struct nv_drm_plane {
/**
* @base:
*
* Base DRM plane object for this plane.
*/
struct drm_plane base;
/**
* @defaultCompositionMode:
*
* Default composition blending mode of this plane.
*/
enum NvKmsCompositionBlendingMode defaultCompositionMode;
/**
* @layer_idx
*
* Index of this plane in the per head array of layers.
*/
uint32_t layer_idx;
};
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
{
if (plane == NULL) {
return NULL;
}
return container_of(plane, struct nv_drm_plane, base);
}
struct nv_drm_plane_state {
struct drm_plane_state base;
s32 __user *fd_user_ptr;
enum NvKmsInputColorSpace input_colorspace;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property_blob *hdr_output_metadata;
#endif
};
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct nv_drm_plane_state, base);
}
static inline const struct nv_drm_plane_state *to_nv_drm_plane_state_const(const struct drm_plane_state *state)
{
return container_of(state, const struct nv_drm_plane_state, base);
}
static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc)
{
if (crtc == NULL) {
return NULL;
}
return container_of(crtc, struct nv_drm_crtc, base);
}
/*
* CRTCs are static objects, list does not change once after initialization and
* before teardown of device. Initialization/teardown paths are single
* threaded, so no locking required.
*/
static inline
struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head)
{
struct drm_crtc *crtc;
nv_drm_for_each_crtc(crtc, nv_dev->dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (nv_crtc->head == head) {
return nv_crtc;
}
}
return NULL;
}
/**
* nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc.
*/
static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc,
struct nv_drm_flip *nv_flip)
{
spin_lock(&nv_crtc->flip_list_lock);
list_add(&nv_flip->list_entry, &nv_crtc->flip_list);
spin_unlock(&nv_crtc->flip_list_lock);
}
/**
* nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc.
*/
static inline
struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc)
{
struct nv_drm_flip *nv_flip = NULL;
uint32_t pending_events = 0;
spin_lock(&nv_crtc->flip_list_lock);
nv_flip = list_first_entry_or_null(&nv_crtc->flip_list,
struct nv_drm_flip, list_entry);
if (likely(nv_flip != NULL)) {
/*
* Decrement pending_event count and dequeue flip object if
* pending_event count becomes 0.
*/
pending_events = --nv_flip->pending_events;
if (!pending_events) {
list_del(&nv_flip->list_entry);
}
}
spin_unlock(&nv_crtc->flip_list_lock);
if (WARN_ON(nv_flip == NULL) || pending_events) {
return NULL;
}
return nv_flip;
}
void nv_drm_enumerate_crtcs_and_planes(
struct nv_drm_device *nv_dev,
const struct NvKmsKapiDeviceResourcesInfo *pResInfo);
int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_CRTC_H__ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_DRV_H__
#define __NVIDIA_DRM_DRV_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
int nv_drm_probe_devices(void);
void nv_drm_remove_devices(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_DRV_H__ */

View File

@@ -0,0 +1,352 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-encoder.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-connector.h"
#include "nvidia-drm-crtc.h"
#include "nvidia-drm-helper.h"
#include "nvmisc.h"
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
* moves a number of helper function definitions from
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
*/
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
#include <drm/drm_probe_helper.h>
#endif
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
static void nv_drm_encoder_destroy(struct drm_encoder *encoder)
{
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
drm_encoder_cleanup(encoder);
nv_drm_free(nv_encoder);
}
static const struct drm_encoder_funcs nv_encoder_funcs = {
.destroy = nv_drm_encoder_destroy,
};
static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void nv_drm_encoder_prepare(struct drm_encoder *encoder)
{
}
static void nv_drm_encoder_commit(struct drm_encoder *encoder)
{
}
static void nv_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = {
.mode_fixup = nv_drm_encoder_mode_fixup,
.prepare = nv_drm_encoder_prepare,
.commit = nv_drm_encoder_commit,
.mode_set = nv_drm_encoder_mode_set,
};
static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask)
{
struct drm_crtc *crtc = NULL;
uint32_t crtc_mask = 0x0;
nv_drm_for_each_crtc(crtc, dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (headMask & NVBIT(nv_crtc->head)) {
crtc_mask |= drm_crtc_mask(crtc);
}
}
return crtc_mask;
}
/*
* Helper function to create new encoder for given NvKmsKapiDisplay
* with given signal format.
*/
static struct drm_encoder*
nv_drm_encoder_new(struct drm_device *dev,
NvKmsKapiDisplay hDisplay,
NvKmsConnectorSignalFormat format,
unsigned int crtc_mask)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_encoder = NULL;
int ret = 0;
/* Allocate an NVIDIA encoder object */
nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder));
if (nv_encoder == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate memory for NVIDIA-DRM encoder object");
return ERR_PTR(-ENOMEM);
}
nv_encoder->hDisplay = hDisplay;
/* Initialize the base encoder object and add it to the drm subsystem */
ret = drm_encoder_init(dev,
&nv_encoder->base, &nv_encoder_funcs,
nvkms_connector_signal_to_drm_encoder_signal(format)
#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG)
, NULL
#endif
);
if (ret != 0) {
nv_drm_free(nv_encoder);
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x",
hDisplay);
return ERR_PTR(ret);
}
nv_encoder->base.possible_crtcs = crtc_mask;
drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs);
return &nv_encoder->base;
}
/*
* Add encoder for given NvKmsKapiDisplay
*/
struct drm_encoder*
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL;
struct NvKmsKapiConnectorInfo *connectorInfo = NULL;
struct drm_encoder *encoder = NULL;
struct nv_drm_encoder *nv_encoder = NULL;
struct drm_connector *connector = NULL;
int ret = 0;
/* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */
if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) {
ret = -EINVAL;
goto done;
}
connectorInfo = nvkms_get_connector_info(nv_dev->pDevice,
displayInfo->connectorHandle);
if (IS_ERR(connectorInfo)) {
ret = PTR_ERR(connectorInfo);
goto done;
}
/* Create and add drm encoder */
encoder = nv_drm_encoder_new(dev,
displayInfo->handle,
connectorInfo->signalFormat,
get_crtc_mask(dev, displayInfo->headMask));
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
goto done;
}
/* Get connector from respective physical index */
connector =
nv_drm_get_connector(dev,
connectorInfo->physicalIndex,
connectorInfo->type,
displayInfo->internal, displayInfo->dpAddress);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
goto failed_connector_encoder_attach;
}
/* Attach encoder and connector */
ret = nv_drm_connector_attach_encoder(connector, encoder);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to attach encoder created from NvKmsKapiDisplay 0x%08x "
"to connector",
hDisplay);
goto failed_connector_encoder_attach;
}
nv_encoder = to_nv_encoder(encoder);
mutex_lock(&dev->mode_config.mutex);
nv_encoder->nv_connector = to_nv_connector(connector);
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
mutex_unlock(&dev->mode_config.mutex);
goto done;
failed_connector_encoder_attach:
drm_encoder_cleanup(encoder);
nv_drm_free(encoder);
done:
nv_drm_free(displayInfo);
nv_drm_free(connectorInfo);
return ret != 0 ? ERR_PTR(ret) : encoder;
}
static inline struct nv_drm_encoder*
get_nv_encoder_from_nvkms_display(struct drm_device *dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_encoder *encoder;
nv_drm_for_each_encoder(encoder, dev) {
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
if (nv_encoder->hDisplay == hDisplay) {
return nv_encoder;
}
}
return NULL;
}
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_device *dev = nv_dev->dev;
struct nv_drm_encoder *nv_encoder = NULL;
mutex_lock(&dev->mode_config.mutex);
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
mutex_unlock(&dev->mode_config.mutex);
if (nv_encoder == NULL) {
return;
}
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
drm_kms_helper_hotplug_event(dev);
}
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_device *dev = nv_dev->dev;
struct drm_encoder *encoder = NULL;
struct nv_drm_encoder *nv_encoder = NULL;
/*
* Look for an existing encoder with the same hDisplay and
* use it if available.
*/
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
if (nv_encoder != NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
hDisplay);
return;
}
encoder = nv_drm_add_encoder(dev, hDisplay);
if (IS_ERR(encoder)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to add encoder for NvKmsKapiDisplay 0x%08x",
hDisplay);
return;
}
/*
* On some kernels, DRM has the notion of a "primary group" that
* tracks the global mode setting state for the device.
*
* On kernels where DRM has a primary group, we need to reinitialize
* after adding encoders and connectors.
*/
#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT)
drm_reinit_primary_mode_group(dev);
#endif
drm_kms_helper_hotplug_event(dev);
}
#endif

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_ENCODER_H__
#define __NVIDIA_DRM_ENCODER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRM_ENCODER_H_PRESENT)
#include <drm/drm_encoder.h>
#else
#include <drm/drmP.h>
#endif
#include "nvkms-kapi.h"
struct nv_drm_encoder {
NvKmsKapiDisplay hDisplay;
struct nv_drm_connector *nv_connector;
struct drm_encoder base;
};
static inline struct nv_drm_encoder *to_nv_encoder(
struct drm_encoder *encoder)
{
if (encoder == NULL) {
return NULL;
}
return container_of(encoder, struct nv_drm_encoder, base);
}
struct drm_encoder*
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay);
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay);
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_ENCODER_H__ */

View File

@@ -0,0 +1,279 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
#include "nvidia-drm-fb.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-gem.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-format.h"
#include <drm/drm_crtc_helper.h>
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
{
uint32_t i;
/* Unreference gem object */
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]);
}
}
/* Free framebuffer */
nv_drm_free(nv_fb);
}
static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct nv_drm_device *nv_dev = to_nv_device(fb->dev);
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
/* Cleaup core framebuffer object */
drm_framebuffer_cleanup(fb);
/* Free NvKmsKapiSurface associated with this framebuffer object */
nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface);
__nv_drm_framebuffer_free(nv_fb);
}
static int
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file, unsigned int *handle)
{
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
return nv_drm_gem_handle_create(file,
nv_fb->nv_gem[0],
handle);
}
static struct drm_framebuffer_funcs nv_framebuffer_funcs = {
.destroy = nv_drm_framebuffer_destroy,
.create_handle = nv_drm_framebuffer_create_handle,
};
static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
uint32_t i;
/* Allocate memory for the framebuffer object */
nv_fb = nv_drm_calloc(1, sizeof(*nv_fb));
if (nv_fb == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to allocate memory for framebuffer object");
return ERR_PTR(-ENOMEM);
}
if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
goto failed;
}
for (i = 0; i < num_planes; i++) {
if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup(
dev,
file,
cmd->handles[i])) == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to find gem object of type nvkms memory");
goto failed;
}
}
return nv_fb;
failed:
__nv_drm_framebuffer_free(nv_fb);
return ERR_PTR(-ENOENT);
}
static int nv_drm_framebuffer_init(struct drm_device *dev,
struct nv_drm_framebuffer *nv_fb,
enum NvKmsSurfaceMemoryFormat format,
bool have_modifier,
uint64_t modifier)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiCreateSurfaceParams params = { };
uint32_t i;
int ret;
/* Initialize the base framebuffer object and add it to drm subsystem */
ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs);
if (ret != 0) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to initialize framebuffer object");
return ret;
}
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
if (!nvKms->isMemoryValidForDisplay(nv_dev->pDevice,
nv_fb->nv_gem[i]->pMemory)) {
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Framebuffer memory not appropriate for scanout");
goto fail;
}
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
params.planes[i].offset = nv_fb->base.offsets[i];
params.planes[i].pitch = nv_fb->base.pitches[i];
}
}
params.height = nv_fb->base.height;
params.width = nv_fb->base.width;
params.format = format;
if (have_modifier) {
params.explicit_layout = true;
params.layout = (modifier & 0x10) ?
NvKmsSurfaceMemoryLayoutBlockLinear :
NvKmsSurfaceMemoryLayoutPitch;
// See definition of DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D, we are testing
// 'c', the lossless compression field of the modifier
if (params.layout == NvKmsSurfaceMemoryLayoutBlockLinear &&
(modifier >> 23) & 0x7) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Cannot create FB from compressible surface allocation");
goto fail;
}
params.log2GobsPerBlockY = modifier & 0xf;
} else {
params.explicit_layout = false;
}
/* Create NvKmsKapiSurface */
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);
if (nv_fb->pSurface == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface");
goto fail;
}
return 0;
fail:
drm_framebuffer_cleanup(&nv_fb->base);
return -EINVAL;
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
uint64_t modifier = 0;
int ret;
enum NvKmsSurfaceMemoryFormat format;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
int i;
#endif
bool have_modifier = false;
/* Check whether NvKms supports the given pixel format */
if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Unsupported drm pixel format 0x%08x", cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
if (cmd->flags & DRM_MODE_FB_MODIFIERS) {
have_modifier = true;
modifier = cmd->modifier[0];
for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) {
if (nv_dev->modifiers[i] == modifier) {
break;
}
}
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Invalid format modifier for framebuffer object: 0x%016llx",
modifier);
return ERR_PTR(-EINVAL);
}
}
#endif
nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd);
if (IS_ERR(nv_fb)) {
return (struct drm_framebuffer *)nv_fb;
}
/* Fill out framebuffer metadata from the userspace fb creation request */
drm_helper_mode_fill_fb_struct(
#if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG)
dev,
#endif
&nv_fb->base,
cmd);
/*
* Finish up FB initialization by creating the backing NVKMS surface and
* publishing the DRM fb
*/
ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier);
if (ret != 0) {
__nv_drm_framebuffer_free(nv_fb);
return ERR_PTR(ret);
}
return &nv_fb->base;
}
#endif

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_FB_H__
#define __NVIDIA_DRM_FB_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvkms-kapi.h"
struct nv_drm_framebuffer {
struct NvKmsKapiSurface *pSurface;
struct nv_drm_gem_object*
nv_gem[NVKMS_MAX_PLANES_PER_SURFACE];
struct drm_framebuffer base;
};
static inline struct nv_drm_framebuffer *to_nv_framebuffer(
struct drm_framebuffer *fb)
{
if (fb == NULL) {
return NULL;
}
return container_of(fb, struct nv_drm_framebuffer, base);
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FB_H__ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_PRIME_FENCE_H__
#define __NVIDIA_DRM_PRIME_FENCE_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
struct drm_file;
struct drm_device;
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */

View File

@@ -0,0 +1,169 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include "nvidia-drm-format.h"
#include "nvidia-drm-os-interface.h"
static const u32 nvkms_to_drm_format[] = {
/* RGB formats */
[NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555,
[NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555,
[NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565,
[NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888,
[NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888,
[NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888,
[NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010,
[NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010,
[NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888,
#if defined(DRM_FORMAT_ABGR16161616F)
[NvKmsSurfaceMemoryFormatRF16GF16BF16AF16] = DRM_FORMAT_ABGR16161616F,
#endif
#if defined(DRM_FORMAT_XBGR16161616F)
[NvKmsSurfaceMemoryFormatRF16GF16BF16XF16] = DRM_FORMAT_XBGR16161616F,
#endif
[NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV,
[NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY,
/* YUV semi-planar formats
*
* NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means
* that the UV components are packed like UUUUUVVVVV (MSB to LSB)
* and Yx_VxUx means VVVVVUUUUU (MSB to LSB).
*/
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
[NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */
[NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */
[NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */
#if defined(DRM_FORMAT_P210)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*
* 2x1 subsampled Cr:Cb plane, 10 bit per channel
*/
[NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210,
#endif
#if defined(DRM_FORMAT_P010)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*
* 2x2 subsampled Cr:Cb plane 10 bits per channel
*/
[NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010,
#endif
#if defined(DRM_FORMAT_P012)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [12:4] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
*
* 2x2 subsampled Cr:Cb plane 12 bits per channel
*/
[NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012,
#endif
};
bool nv_drm_format_to_nvkms_format(u32 format,
enum NvKmsSurfaceMemoryFormat *nvkms_format)
{
enum NvKmsSurfaceMemoryFormat i;
for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) {
/*
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
* entries when iterating through it.
*/
if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) {
*nvkms_format = i;
return true;
}
}
return false;
}
uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask)
{
enum NvKmsSurfaceMemoryFormat i;
unsigned int max_count = hweight64(nvkms_format_mask);
uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count);
if (array == NULL) {
return NULL;
}
*count = 0;
for_each_set_bit(i, &nvkms_format_mask,
sizeof(nvkms_format_mask) * BITS_PER_BYTE) {
if (i >= ARRAY_SIZE(nvkms_to_drm_format)) {
break;
}
/*
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
* entries when iterating through it.
*/
if (nvkms_to_drm_format[i] == 0) {
continue;
}
array[(*count)++] = nvkms_to_drm_format[i];
}
if (*count == 0) {
nv_drm_free(array);
return NULL;
}
return array;
}
#endif

View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_FORMAT_H__
#define __NVIDIA_DRM_FORMAT_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include <drm/drm_fourcc.h>
#include "nvkms-format.h"
bool nv_drm_format_to_nvkms_format(u32 format,
enum NvKmsSurfaceMemoryFormat *nvkms_format);
uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -0,0 +1,228 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-ioctl.h"
#include "linux/dma-buf.h"
static inline
void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem);
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (nv_dma_buf->base.pMemory) {
/* Free NvKmsKapiMemory handle associated with this gem object */
nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory);
}
#endif
drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt);
nv_drm_free(nv_dma_buf);
}
static int __nv_drm_gem_dma_buf_create_mmap_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
(void)nv_dev;
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
}
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
struct dma_buf *dma_buf = attach->dmabuf;
struct file *old_file;
int ret;
/* check if buffer supports mmap */
if (!dma_buf->file->f_op->mmap)
return -EINVAL;
/* readjust the vma */
get_file(dma_buf->file);
old_file = vma->vm_file;
vma->vm_file = dma_buf->file;
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);;
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
if (ret) {
/* restore old parameters on failure */
vma->vm_file = old_file;
fput(dma_buf->file);
} else {
if (old_file)
fput(old_file);
}
return ret;
}
const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = {
.free = __nv_drm_gem_dma_buf_free,
.create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset,
.mmap = __nv_drm_gem_dma_buf_mmap,
};
struct drm_gem_object*
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct dma_buf *dma_buf = attach->dmabuf;
struct nv_drm_gem_dma_buf *nv_dma_buf;
struct NvKmsKapiMemory *pMemory;
if ((nv_dma_buf =
nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) {
return NULL;
}
// dma_buf->size must be a multiple of PAGE_SIZE
BUG_ON(dma_buf->size % PAGE_SIZE);
pMemory = NULL;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice,
(NvP64)(NvUPtr)dma_buf,
dma_buf->size - 1);
}
#endif
nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base,
&__nv_gem_dma_buf_ops, dma_buf->size, pMemory);
nv_dma_buf->sgt = sgt;
return &nv_dma_buf->base.base;
}
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_export_dmabuf_memory_params *p = data;
struct nv_drm_gem_dma_buf *nv_dma_buf = NULL;
int ret = 0;
struct NvKmsKapiMemory *pTmpMemory = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto done;
}
if (p->__pad != 0) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(
dev, filep, p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup DMA-BUF GEM object for export: 0x%08x",
p->handle);
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!nv_dma_buf->base.pMemory) {
/*
* Get RM system memory handle from SGT - RM will take a reference
* on this GEM object to prevent the DMA-BUF from being unpinned
* prematurely.
*/
pTmpMemory = nvKms->getSystemMemoryHandleFromSgt(
nv_dev->pDevice,
(NvP64)(NvUPtr)nv_dma_buf->sgt,
(NvP64)(NvUPtr)&nv_dma_buf->base.base,
nv_dma_buf->base.base.size - 1);
}
}
#endif
if (!nv_dma_buf->base.pMemory && !pTmpMemory) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get memory to export from DMA-BUF GEM object: 0x%08x",
p->handle);
goto done;
}
if (!nvKms->exportMemory(nv_dev->pDevice,
nv_dma_buf->base.pMemory ?
nv_dma_buf->base.pMemory : pTmpMemory,
p->nvkms_params_ptr,
p->nvkms_params_size)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to export memory from DMA-BUF GEM object: 0x%08x",
p->handle);
goto done;
}
done:
if (pTmpMemory) {
/*
* Release reference on RM system memory to prevent circular
* refcounting. Another refcount will still be held by RM FD.
*/
nvKms->freeMemory(nv_dev->pDevice, pTmpMemory);
}
if (nv_dma_buf != NULL) {
nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base);
}
return ret;
}
#endif

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__
#define __NVIDIA_DRM_GEM_DMA_BUF_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_dma_buf {
struct nv_drm_gem_object base;
struct sg_table *sgt;
};
extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops;
static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_dma_buf, base);
}
return NULL;
}
static inline
struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_dma_buf(nv_gem);
}
struct drm_gem_object*
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif
#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */

View File

@@ -0,0 +1,609 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include <linux/io.h>
#include "nv-mm.h"
static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (nv_nvkms_memory->physically_mapped) {
if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) {
iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress);
}
nvKms->unmapMemory(nv_dev->pDevice,
nv_nvkms_memory->base.pMemory,
NVKMS_KAPI_MAPPING_TYPE_USER,
nv_nvkms_memory->pPhysicalAddress);
}
if (nv_nvkms_memory->pages_count != 0) {
nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages);
}
/* Free NvKmsKapiMemory handle associated with this gem object */
nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory);
nv_drm_free(nv_nvkms_memory);
}
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
return drm_gem_mmap_obj(&nv_gem->base,
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
}
static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset, pfn;
vm_fault_t ret;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
if (nv_nvkms_memory->pages_count == 0) {
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
pfn >>= PAGE_SHIFT;
pfn += page_offset;
} else {
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
}
#if defined(NV_VMF_INSERT_PFN_PRESENT)
ret = vmf_insert_pfn(vma, address, pfn);
#else
ret = vm_insert_pfn(vma, address, pfn);
switch (ret) {
case 0:
case -EBUSY:
/*
* EBUSY indicates that another thread already handled
* the faulted range.
*/
ret = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
default:
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
ret = VM_FAULT_SIGBUS;
break;
}
#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */
return ret;
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return VM_FAULT_SIGBUS;
}
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src);
static int __nv_drm_gem_nvkms_map(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
{
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
if (!nv_dev->hasVideoMemory) {
return 0;
}
if (!nvKms->mapMemory(nv_dev->pDevice,
pMemory,
NVKMS_KAPI_MAPPING_TYPE_USER,
&nv_nvkms_memory->pPhysicalAddress)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to map NvKmsKapiMemory 0x%p",
pMemory);
return -ENOMEM;
}
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
(uintptr_t)nv_nvkms_memory->pPhysicalAddress,
nv_nvkms_memory->base.base.size);
if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Failed to ioremap_wc NvKmsKapiMemory 0x%p",
pMemory);
}
nv_nvkms_memory->physically_mapped = true;
return 0;
}
static void *__nv_drm_gem_nvkms_prime_vmap(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ERR_PTR(ret);
}
}
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
}
static int __nv_drm_gem_map_nvkms_memory_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ret;
}
}
return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset);
}
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
struct sg_table *sg_table;
if (nv_nvkms_memory->pages_count == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
nv_gem->pMemory);
return ERR_PTR(-ENOMEM);
}
sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
nv_nvkms_memory->pages,
nv_nvkms_memory->pages_count);
return sg_table;
}
const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
.free = __nv_drm_gem_nvkms_memory_free,
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
.mmap = __nv_drm_gem_nvkms_mmap,
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
.prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table,
};
static int __nv_drm_nvkms_gem_obj_init(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
struct NvKmsKapiMemory *pMemory,
uint64_t size)
{
NvU64 *pages = NULL;
NvU32 numPages = 0;
nv_nvkms_memory->pPhysicalAddress = NULL;
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;
if (!nvKms->getMemoryPages(nv_dev->pDevice,
pMemory,
&pages,
&numPages) &&
!nv_dev->hasVideoMemory) {
/* GetMemoryPages may fail for vidmem allocations,
* but it should not fail for sysmem allocations. */
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to get memory pages for NvKmsKapiMemory 0x%p",
pMemory);
return -ENOMEM;
}
nv_nvkms_memory->pages_count = numPages;
nv_nvkms_memory->pages = (struct page **)pages;
nv_drm_gem_object_init(nv_dev,
&nv_nvkms_memory->base,
&nv_gem_nvkms_memory_ops,
size,
pMemory);
return 0;
}
int nv_drm_dumb_create(
struct drm_file *file_priv,
struct drm_device *dev, struct drm_mode_create_dumb *args)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
uint8_t compressible = 0;
struct NvKmsKapiMemory *pMemory;
int ret = 0;
args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
nv_dev->pitchAlignment);
args->size = args->height * args->pitch;
/* Core DRM requires gem object size to be aligned with PAGE_SIZE */
args->size = roundup(args->size, PAGE_SIZE);
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto fail;
}
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
}
if (pMemory == NULL) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate NvKmsKapiMemory for dumb object of size %llu",
args->size);
goto nvkms_alloc_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
/* Always map dumb buffer memory up front. Clients are only expected
* to use dumb buffers for software rendering, so they're not much use
* without a CPU mapping.
*/
ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
goto fail;
}
return nv_drm_gem_handle_create_drop_reference(file_priv,
&nv_nvkms_memory->base,
&args->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_alloc_memory_failed:
nv_drm_free(nv_nvkms_memory);
fail:
return ret;
}
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_import_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct NvKmsKapiMemory *pMemory;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto failed;
}
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
pMemory = nvKms->importMemory(nv_dev->pDevice,
p->mem_size,
p->nvkms_params_ptr,
p->nvkms_params_size);
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to import NVKMS memory to GEM object");
goto nvkms_import_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_nvkms_memory->base,
&p->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_import_memory_failed:
nv_drm_free(nv_nvkms_memory);
failed:
return ret;
}
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_export_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto done;
}
if (p->__pad != 0) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
filep,
p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup NVKMS gem object for export: 0x%08x",
p->handle);
goto done;
}
if (!nvKms->exportMemory(nv_dev->pDevice,
nv_nvkms_memory->base.pMemory,
p->nvkms_params_ptr,
p->nvkms_params_size)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to export memory from NVKMS GEM object: 0x%08x", p->handle);
goto done;
}
done:
if (nv_nvkms_memory != NULL) {
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
}
return ret;
}
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
struct NvKmsKapiMemory *pMemory;
enum NvKmsSurfaceMemoryLayout layout;
enum NvKmsKapiAllocationType type;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto failed;
}
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
goto failed;
}
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
layout = p->block_linear ?
NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
}
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to allocate NVKMS memory for GEM object");
goto nvkms_alloc_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory,
p->memory_size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_nvkms_memory->base,
&p->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_alloc_memory_failed:
nv_drm_free(nv_nvkms_memory);
failed:
return ret;
}
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
const struct nv_drm_device *nv_dev_src;
const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct NvKmsKapiMemory *pMemory;
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src);
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
return NULL;
}
pMemory = nvKms->dupMemory(nv_dev->pDevice,
nv_dev_src->pDevice, nv_gem_src->pMemory);
if (pMemory == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to import NVKMS memory to GEM object");
goto nvkms_dup_memory_failed;
}
if (__nv_drm_nvkms_gem_obj_init(nv_dev,
nv_nvkms_memory,
pMemory,
nv_gem_src->base.size)) {
goto nvkms_gem_obj_init_failed;
}
return &nv_nvkms_memory->base.base;
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_dup_memory_failed:
nv_drm_free(nv_nvkms_memory);
return NULL;
}
int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
int ret = -EINVAL;
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
file,
handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for mapping: 0x%08x",
handle);
return ret;
}
ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev,
&nv_nvkms_memory->base, offset);
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
return ret;
}
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif

View File

@@ -0,0 +1,112 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_nvkms_memory {
struct nv_drm_gem_object base;
bool physically_mapped;
void *pPhysicalAddress;
void *pWriteCombinedIORemapAddress;
struct page **pages;
unsigned long pages_count;
};
extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops;
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
}
return NULL;
}
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const(
const struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
}
return NULL;
}
static inline
struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_nvkms_memory(nv_gem);
}
int nv_drm_dumb_create(
struct drm_file *file_priv,
struct drm_device *dev, struct drm_mode_create_dumb *args);
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
struct drm_device *dev,
struct drm_gem_object *gem);
#endif
#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */

View File

@@ -0,0 +1,216 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#include "linux/dma-buf.h"
#include "linux/mm.h"
#include "nv-mm.h"
static inline
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
nv_drm_unlock_user_pages(nv_user_memory->pages_count,
nv_user_memory->pages);
nv_drm_free(nv_user_memory);
}
static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
struct drm_gem_object *gem = &nv_gem->base;
return nv_drm_prime_pages_to_sg(gem->dev,
nv_user_memory->pages,
nv_user_memory->pages_count);
}
static void *__nv_drm_gem_user_memory_prime_vmap(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
return nv_drm_vmap(nv_user_memory->pages,
nv_user_memory->pages_count);
}
static void __nv_drm_gem_user_memory_prime_vunmap(
struct nv_drm_gem_object *gem,
void *address)
{
nv_drm_vunmap(address);
}
static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
int ret = drm_gem_mmap_obj(&nv_gem->base,
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
if (ret < 0) {
return ret;
}
/*
* Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW
* with MAP_PRIVATE and VM_MIXEDMAP
*/
if (!(vma->vm_flags & VM_SHARED)) {
return -EINVAL;
}
nv_vm_flags_clear(vma, VM_PFNMAP);
nv_vm_flags_clear(vma, VM_IO);
nv_vm_flags_set(vma, VM_MIXEDMAP);
return 0;
}
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset;
vm_fault_t ret;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
BUG_ON(page_offset >= nv_user_memory->pages_count);
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
switch (ret) {
case 0:
case -EBUSY:
/*
* EBUSY indicates that another thread already handled
* the faulted range.
*/
ret = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
default:
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
ret = VM_FAULT_SIGBUS;
break;
}
return ret;
}
static int __nv_drm_gem_user_create_mmap_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
(void)nv_dev;
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
}
const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = {
.free = __nv_drm_gem_user_memory_free,
.prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table,
.prime_vmap = __nv_drm_gem_user_memory_prime_vmap,
.prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap,
.mmap = __nv_drm_gem_user_memory_mmap,
.handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_user_create_mmap_offset,
};
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_import_userspace_memory_params *params = data;
struct nv_drm_gem_user_memory *nv_user_memory;
struct page **pages = NULL;
unsigned long pages_count = 0;
int ret = 0;
if ((params->size % PAGE_SIZE) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Userspace memory 0x%llx size should be in a multiple of page "
"size to create a gem object",
params->address);
return -EINVAL;
}
pages_count = params->size / PAGE_SIZE;
ret = nv_drm_lock_user_pages(params->address, pages_count, &pages);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lock user pages for address 0x%llx: %d",
params->address, ret);
return ret;
}
if ((nv_user_memory =
nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
nv_user_memory->pages = pages;
nv_user_memory->pages_count = pages_count;
nv_drm_gem_object_init(nv_dev,
&nv_user_memory->base,
&__nv_gem_user_memory_ops,
params->size,
NULL /* pMemory */);
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_user_memory->base,
&params->handle);
failed:
nv_drm_unlock_user_pages(pages_count, pages);
return ret;
}
#endif

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__
#define __NVIDIA_DRM_GEM_USER_MEMORY_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_user_memory {
struct nv_drm_gem_object base;
struct page **pages;
unsigned long pages_count;
};
extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops;
static inline struct nv_drm_gem_user_memory *to_nv_user_memory(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_user_memory, base);
}
return NULL;
}
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
static inline
struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_user_memory(nv_gem);
}
#endif
#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */

View File

@@ -0,0 +1,396 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
#include "nvidia-drm-fence.h"
#include "nvidia-drm-gem.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-dma-resv-helper.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
#include "linux/dma-buf.h"
#include "nv-mm.h"
void nv_drm_gem_free(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
/* Cleanup core gem object */
drm_gem_object_release(&nv_gem->base);
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_fini(&nv_gem->resv);
#endif
nv_gem->ops->free(nv_gem);
}
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \
defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
/*
* The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit
* 7938f4218168 ("dma-buf-map: Rename to iosys-map").
*/
#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT)
typedef struct iosys_map nv_sysio_map_t;
#else
typedef struct dma_buf_map nv_sysio_map_t;
#endif
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
nv_sysio_map_t *map)
{
void *vaddr = nv_drm_gem_prime_vmap(gem);
if (vaddr == NULL) {
return -ENOMEM;
} else if (IS_ERR(vaddr)) {
return PTR_ERR(vaddr);
}
map->vaddr = vaddr;
map->is_iomem = true;
return 0;
}
static void nv_drm_gem_vunmap(struct drm_gem_object *gem,
nv_sysio_map_t *map)
{
nv_drm_gem_prime_vunmap(gem, map->vaddr);
map->vaddr = NULL;
}
#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \
!defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
static struct drm_gem_object_funcs nv_drm_gem_funcs = {
.free = nv_drm_gem_free,
.get_sg_table = nv_drm_gem_prime_get_sg_table,
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
.export = drm_gem_prime_export,
#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
.vmap = nv_drm_gem_vmap,
.vunmap = nv_drm_gem_vunmap,
#else
.vmap = nv_drm_gem_prime_vmap,
.vunmap = nv_drm_gem_prime_vunmap,
#endif
.vm_ops = &nv_drm_gem_vma_ops,
#endif
};
#endif
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
const struct nv_drm_gem_object_funcs * const ops,
size_t size,
struct NvKmsKapiMemory *pMemory)
{
struct drm_device *dev = nv_dev->dev;
nv_gem->nv_dev = nv_dev;
nv_gem->ops = ops;
nv_gem->pMemory = pMemory;
/* Initialize the gem object */
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_init(&nv_gem->resv);
#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
nv_gem->base.funcs = &nv_drm_gem_funcs;
#endif
drm_gem_private_object_init(dev, &nv_gem->base, size);
}
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
#if defined(NV_DMA_BUF_OWNER_PRESENT)
struct drm_gem_object *gem_dst;
struct nv_drm_gem_object *nv_gem_src;
if (dma_buf->owner == dev->driver->fops->owner) {
nv_gem_src = to_nv_gem_object(dma_buf->priv);
if (nv_gem_src->base.dev != dev &&
nv_gem_src->ops->prime_dup != NULL) {
/*
* If we're importing from another NV device, try to handle the
* import internally rather than attaching through the dma-buf
* mechanisms. Importing from the same device is even easier,
* and drm_gem_prime_import() handles that just fine.
*/
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
if (gem_dst)
return gem_dst;
}
}
#endif /* NV_DMA_BUF_OWNER_PRESENT */
return drm_gem_prime_import(dev, dma_buf);
}
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_get_sg_table != NULL) {
return nv_gem->ops->prime_get_sg_table(nv_gem);
}
return ERR_PTR(-ENOTSUPP);
}
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_vmap != NULL) {
return nv_gem->ops->prime_vmap(nv_gem);
}
return ERR_PTR(-ENOTSUPP);
}
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_vunmap != NULL) {
nv_gem->ops->prime_vunmap(nv_gem, address);
}
}
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj);
return nv_drm_gem_res_obj(nv_gem);
}
#endif
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_map_offset_params *params = data;
struct nv_drm_gem_object *nv_gem;
int ret;
if ((nv_gem = nv_drm_gem_object_lookup(dev,
filep,
params->handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for map: 0x%08x",
params->handle);
return -EINVAL;
}
if (nv_gem->ops->create_mmap_offset) {
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &params->offset);
} else {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Gem object type does not support mapping: 0x%08x",
params->handle);
ret = -EINVAL;
}
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct drm_file *priv = file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret = 0;
struct nv_drm_gem_object *nv_gem;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it proceeds
* to tear down the object. In the process it will attempt to remove
* the VMA offset and so acquire this mgr->vm_lock. Therefore if we
* find an object with a 0-refcnt that matches our range, we know it is
* in the process of being destroyed and will be freed as soon as we
* release the lock - so we have to check for the 0-refcnted object and
* treat it as invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
return -EINVAL;
nv_gem = to_nv_gem_object(obj);
if (nv_gem->ops->mmap == NULL) {
ret = -EINVAL;
goto done;
}
if (!nv_drm_vma_node_is_allowed(node, file)) {
ret = -EACCES;
goto done;
}
#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY)
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
ret = -EINVAL;
goto done;
}
nv_vm_flags_clear(vma, VM_MAYWRITE);
}
#endif
ret = nv_gem->ops->mmap(nv_gem, vma);
done:
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
#endif
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct drm_nvidia_gem_identify_object_params *p = data;
struct nv_drm_gem_dma_buf *nv_dma_buf;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct nv_drm_gem_user_memory *nv_user_memory;
struct nv_drm_gem_object *nv_gem = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EINVAL;
}
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
if (nv_dma_buf) {
p->object_type = NV_GEM_OBJECT_DMABUF;
nv_gem = &nv_dma_buf->base;
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle);
if (nv_nvkms_memory) {
p->object_type = NV_GEM_OBJECT_NVKMS;
nv_gem = &nv_nvkms_memory->base;
goto done;
}
#endif
nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle);
if (nv_user_memory) {
p->object_type = NV_GEM_OBJECT_USERMEMORY;
nv_gem = &nv_user_memory->base;
goto done;
}
p->object_type = NV_GEM_OBJECT_UNKNOWN;
done:
if (nv_gem) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
}
return 0;
}
/* XXX Move these vma operations to os layer */
static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct drm_gem_object *gem = vma->vm_private_data;
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (!nv_gem) {
return VM_FAULT_SIGBUS;
}
return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf);
}
/*
* Note that nv_drm_vma_fault() can be called for different or same
* ranges of the same drm_gem_object simultaneously.
*/
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf)
{
return __nv_drm_vma_fault(vmf->vma, vmf);
}
#else
static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
return __nv_drm_vma_fault(vma, vmf);
}
#endif
const struct vm_operations_struct nv_drm_gem_vma_ops = {
.open = drm_gem_vm_open,
.fault = nv_drm_vma_fault,
.close = drm_gem_vm_close,
};
#endif /* NV_DRM_AVAILABLE */

View File

@@ -0,0 +1,234 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_H__
#define __NVIDIA_DRM_GEM_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvkms-kapi.h"
#include "nv-mm.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include "nvidia-dma-resv-helper.h"
#endif
#include "linux/dma-buf.h"
struct nv_drm_gem_object;
struct nv_drm_gem_object_funcs {
void (*free)(struct nv_drm_gem_object *nv_gem);
struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem);
void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem);
void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address);
struct drm_gem_object *(*prime_dup)(struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src);
int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma);
vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf);
int (*create_mmap_offset)(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset);
};
struct nv_drm_gem_object {
struct drm_gem_object base;
struct nv_drm_device *nv_dev;
const struct nv_drm_gem_object_funcs *ops;
struct NvKmsKapiMemory *pMemory;
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_t resv;
#endif
};
static inline struct nv_drm_gem_object *to_nv_gem_object(
struct drm_gem_object *gem)
{
if (gem != NULL) {
return container_of(gem, struct nv_drm_gem_object, base);
}
return NULL;
}
/*
* drm_gem_object_{get/put}() added by commit
* e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and
* drm_gem_object_{reference/unreference}() removed by commit
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_get(&nv_gem->base);
#else
drm_gem_object_reference(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
drm_gem_object_put_unlocked(&nv_gem->base);
#else
drm_gem_object_put(&nv_gem->base);
#endif
#else
drm_gem_object_unreference_unlocked(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_put(&nv_gem->base);
#else
drm_gem_object_unreference(&nv_gem->base);
#endif
}
static inline int nv_drm_gem_handle_create_drop_reference(
struct drm_file *file_priv,
struct nv_drm_gem_object *nv_gem,
uint32_t *handle)
{
int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle);
/* drop reference from allocate - handle holds it now */
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
static inline int nv_drm_gem_create_mmap_offset(
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
int ret;
if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) {
NV_DRM_DEV_LOG_ERR(
nv_gem->nv_dev,
"drm_gem_create_mmap_offset failed with error code %d",
ret);
goto done;
}
*offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node);
done:
return ret;
}
void nv_drm_gem_free(struct drm_gem_object *gem);
static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3)
return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle));
#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2)
return to_nv_gem_object(drm_gem_object_lookup(filp, handle));
#else
#error "Unknown argument count of drm_gem_object_lookup()"
#endif
}
static inline int nv_drm_gem_handle_create(struct drm_file *filp,
struct nv_drm_gem_object *nv_gem,
uint32_t *handle)
{
return drm_gem_handle_create(filp, &nv_gem->base, handle);
}
#if defined(NV_DRM_FENCE_AVAILABLE)
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
return nv_gem->base.resv;
#else
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
#endif
}
#endif
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
const struct nv_drm_gem_object_funcs * const ops,
size_t size,
struct NvKmsKapiMemory *pMemory);
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem);
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem);
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address);
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj);
#endif
extern const struct vm_operations_struct nv_drm_gem_vma_ops;
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma);
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_GEM_H__ */

View File

@@ -0,0 +1,218 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* This file contains snapshots of DRM helper functions from the
* Linux kernel which are used by nvidia-drm.ko if the target kernel
* predates the helper function. Having these functions consistently
* present simplifies nvidia-drm.ko source.
*/
#include "nvidia-drm-helper.h"
#include "nvidia-drm-priv.h"
#include "nvidia-drm-crtc.h"
#include "nvmisc.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
#include <drm/drm_atomic_uapi.h>
#endif
/*
* The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit
* 720cf96d8fecde29b72e1101f8a567a0ce99594f ("drm: Drop drm_framebuffer.h from
* drm_crtc.h") in linux-next, expected in v5.19-rc7.
*
* We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always
* present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it
* is safe to unconditionally include it when drm_framebuffer_get() is present.
*/
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
{
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
drm_framebuffer_put(fb);
#else
drm_framebuffer_unreference(fb);
#endif
}
/*
* drm_atomic_helper_disable_all() has been added by commit
* 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by:
* Thierry Reding <treding@nvidia.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use
* nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs,
* use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop
* over all modeset object states, and use drm_atomic_state_free() if
* drm_atomic_state_put() is not available.
*
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c @
* 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following
* copyright and license information:
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
nv_drm_for_each_crtc(crtc, dev) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto free;
}
crtc_state->active = false;
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret < 0)
goto free;
}
#if defined(NV_DRM_ROTATION_AVAILABLE)
nv_drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto free;
}
plane_state->rotation = DRM_MODE_ROTATE_0;
}
#endif
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
nv_drm_for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_mask |= NVBIT(drm_plane_index(plane));
plane->old_fb = plane->fb;
}
ret = drm_atomic_commit(state);
free:
if (plane_mask) {
drm_for_each_plane_mask(plane, dev, plane_mask) {
if (ret == 0) {
plane->fb = NULL;
plane->crtc = NULL;
WARN_ON(plane->state->fb);
WARN_ON(plane->state->crtc);
if (plane->old_fb)
__nv_drm_framebuffer_put(plane->old_fb);
}
plane->old_fb = NULL;
}
}
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
return ret;
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */

View File

@@ -0,0 +1,659 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_HELPER_H__
#define __NVIDIA_DRM_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_ROTATE_* , DRM_REFLECT_* */
#include <drm/drm_blend.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
#include <uapi/drm/drm_mode.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/*
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
* DRM_ROTATE_* and DRM_REFLECT_*
*/
#if !defined(DRM_MODE_ROTATE_0)
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
#endif
#endif //NV_DRM_ROTATION_AVAILABLE
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by
* ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15).
*
* drm_dev_unref() has been added and drm_dev_free() removed by commit -
*
* 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789
*/
static inline void nv_drm_dev_free(struct drm_device *dev)
{
#if defined(NV_DRM_DEV_PUT_PRESENT)
drm_dev_put(dev);
#elif defined(NV_DRM_DEV_UNREF_PRESENT)
drm_dev_unref(dev);
#else
drm_dev_free(dev);
#endif
}
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
static inline struct sg_table*
nv_drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG)
return drm_prime_pages_to_sg(dev, pages, nr_pages);
#else
return drm_prime_pages_to_sg(pages, nr_pages);
#endif
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
* drm_for_each_encoder and drm_for_each_plane() were added by kernel
* commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was
* Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
* drm_for_each_encoder and drm_for_each_plane() are copied from
* include/drm/drm_crtc @
* 6295d607ad34ee4e43aab3f20714c2ef7a6adea1
* which has the following copyright and license information:
*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_crtc.h>
#if defined(drm_for_each_plane)
#define nv_drm_for_each_plane(plane, dev) \
drm_for_each_plane(plane, dev)
#else
#define nv_drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
#endif
#if defined(drm_for_each_crtc)
#define nv_drm_for_each_crtc(crtc, dev) \
drm_for_each_crtc(crtc, dev)
#else
#define nv_drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#endif
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector_iter(connector, conn_iter)
#elif defined(drm_for_each_connector)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector(connector, dev)
#else
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \
list_for_each_entry(connector, &(dev)->mode_config.connector_list, head)
#endif
#if defined(drm_for_each_encoder)
#define nv_drm_for_each_encoder(encoder, dev) \
drm_for_each_encoder(encoder, dev)
#else
#define nv_drm_for_each_encoder(encoder, dev) \
list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
#endif
#if defined(drm_for_each_fb)
#define nv_drm_for_each_fb(fb, dev) \
drm_for_each_fb(fb, dev)
#else
#define nv_drm_for_each_fb(fb, dev) \
list_for_each_entry(fb, &(dev)->mode_config.fb_list, head)
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
/*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were added by kernel commit
* df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by:
* Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were copied from
* include/drm/drm_atomic.h @
* 21a01abbe32a3cbeb903378a24e504bfd9fe0648
* which has the following copyright and license information:
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
/**
* nv_drm_for_each_connector_in_state - iterate over all connectors in an
* atomic update
* @__state: &struct drm_atomic_state pointer
* @connector: &struct drm_connector iteration cursor
* @connector_state: &struct drm_connector_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all connectors in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_connector_in_state)
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
((connector) = (__state)->connectors[__i].ptr, \
(connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
#else
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for_each_connector_in_state(__state, connector, connector_state, __i)
#endif
/**
* nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update
* @__state: &struct drm_atomic_state pointer
* @crtc: &struct drm_crtc iteration cursor
* @crtc_state: &struct drm_crtc_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all CRTCs in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_crtc_in_state)
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_crtc && \
((crtc) = (__state)->crtcs[__i].ptr, \
(crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
#else
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for_each_crtc_in_state(__state, crtc, crtc_state, __i)
#endif
/**
* nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @plane_state: &struct drm_plane_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_plane_in_state)
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_total_plane && \
((plane) = (__state)->planes[__i].ptr, \
(plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
#else
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)
{
#if !defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
return drm_connector_find(dev, id);
#elif defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_connector_lookup(dev, filep, id);
#else
return drm_connector_lookup(dev, id);
#endif
}
static inline void nv_drm_connector_put(struct drm_connector *connector)
{
#if defined(NV_DRM_CONNECTOR_PUT_PRESENT)
drm_connector_put(connector);
#elif defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
drm_connector_unreference(connector);
#endif
}
static inline struct drm_crtc *
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_crtc_find(dev, filep, id);
#else
return drm_crtc_find(dev, id);
#endif
}
static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev,
uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_encoder_find(dev, NULL /* file_priv */, id);
#else
return drm_encoder_find(dev, id);
#endif
}
#if defined(NV_DRM_DRM_AUTH_H_PRESENT)
#include <drm/drm_auth.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
/*
* drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master
* pointers in drm_lease.c") in v5.15 (2021-07-20)
*/
static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
{
#if defined(NV_DRM_FILE_GET_MASTER_PRESENT)
return drm_file_get_master(filep);
#else
if (filep->master) {
return drm_master_get(filep->master);
} else {
return NULL;
}
#endif
}
/*
* drm_connector_for_each_possible_encoder() is added by commit
* 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by:
* Ville Syrjälä <ville.syrjala@linux.intel.com>
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h and modified to use nv_drm_encoder_find()
* instead of drm_encoder_find().
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h @
* 83aefbb887b59df0b3520965c3701e01deacfc52
* which has the following copyright and license information:
*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
/**
* nv_drm_connector_for_each_possible_encoder - iterate connector's possible
* encoders
* @connector: &struct drm_connector pointer
* @encoder: &struct drm_encoder pointer used as cursor
* @__i: int iteration cursor, for macro-internal use
*/
#if !defined(drm_connector_for_each_possible_encoder)
#if !defined(for_each_if)
#define for_each_if(condition) if (!(condition)) {} else
#endif
#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \
for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
(connector)->encoder_ids[(__i)] != 0; (__i)++) \
for_each_if((encoder) = \
nv_drm_encoder_find((connector)->dev, \
(connector)->encoder_ids[(__i)]))
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
unsigned int __i; \
__nv_drm_connector_for_each_possible_encoder(connector, encoder, __i)
#define nv_drm_connector_for_each_possible_encoder_end \
}
#else
#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
unsigned int __i; \
drm_connector_for_each_possible_encoder(connector, encoder, __i)
#define nv_drm_connector_for_each_possible_encoder_end \
}
#else
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
drm_connector_for_each_possible_encoder(connector, encoder)
#define nv_drm_connector_for_each_possible_encoder_end
#endif
#endif
static inline int
nv_drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
return drm_mode_connector_attach_encoder(connector, encoder);
#else
return drm_connector_attach_encoder(connector, encoder);
#endif
}
static inline int
nv_drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid)
{
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
return drm_mode_connector_update_edid_property(connector, edid);
#else
return drm_connector_update_edid_property(connector, edid);
#endif
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#include <drm/drm_connector.h>
static inline
void nv_drm_connector_list_iter_begin(struct drm_device *dev,
struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_begin(dev, iter);
#else
drm_connector_list_iter_get(dev, iter);
#endif
}
static inline
void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_end(iter);
#else
drm_connector_list_iter_put(iter);
#endif
}
#endif
/*
* The drm_format_num_planes() function was added by commit d0d110e09629 drm:
* Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype
* was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move
* format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09).
* drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove
* users of drm_format_num_planes) in v5.3 (2019-05-16).
*
* drm_format_info() is available only from v4.10 (2016-10-18), added by commit
* 84770cc24f3a (drm: Centralize format information).
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
static inline int nv_drm_format_num_planes(uint32_t format)
{
#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT)
return drm_format_num_planes(format);
#else
const struct drm_format_info *info = drm_format_info(format);
return info != NULL ? info->num_planes : 1;
#endif
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
/*
* DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support
* was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NONE)
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#endif
#if !defined(DRM_FORMAT_MOD_LINEAR)
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
#endif
/*
* DRM_FORMAT_MOD_INVALID was defined after the original modifier support was
* added to the kernel, for use as a sentinel value.
*/
#if !defined(DRM_FORMAT_RESERVED)
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#endif
#if !defined(DRM_FORMAT_MOD_INVALID)
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
#endif
/*
* DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called
* DRM_FORMAT_MOD_VNEDOR_NV.
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA)
#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV
#endif
/*
* DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the
* upstream kernel headers compared to the other format modifiers.
*/
#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D)
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
fourcc_mod_code(NVIDIA, (0x10 | \
((h) & 0xf) | \
(((k) & 0xff) << 12) | \
(((g) & 0x3) << 20) | \
(((s) & 0x1) << 22) | \
(((c) & 0x7) << 23)))
#endif
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
/*
* drm_vma_offset_exact_lookup_locked() were added
* by kernel commit 2225cfe46bcc which was Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
*
* drm_vma_offset_exact_lookup_locked() were copied from
* include/drm/drm_vma_manager.h @ 2225cfe46bcc
* which has the following copyright and license information:
*
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_vma_manager.h>
/**
* nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT)
return drm_vma_offset_exact_lookup_locked(mgr, start, pages);
#else
struct drm_vma_offset_node *node;
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
#endif
}
static inline bool
nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG)
return drm_vma_node_is_allowed(node, filp->private_data);
#else
return drm_vma_node_is_allowed(node, filp);
#endif
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_HELPER_H__ */

View File

@@ -0,0 +1,354 @@
/*
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_
#define _UAPI_NVIDIA_DRM_IOCTL_H_
#include <drm/drm.h>
/*
* We should do our best to keep these values constant. Any change to these will
* be backwards incompatible with client applications that might be using them
*/
#define DRM_NVIDIA_GET_CRTC_CRC32 0x00
#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01
#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02
#define DRM_NVIDIA_GET_DEV_INFO 0x03
#define DRM_NVIDIA_FENCE_SUPPORTED 0x04
#define DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE 0x05
#define DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH 0x06
#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08
#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09
#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a
#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b
#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c
#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d
#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e
#define DRM_NVIDIA_DMABUF_SUPPORTED 0x0f
#define DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID 0x10
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
struct drm_nvidia_gem_import_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \
struct drm_nvidia_gem_import_userspace_memory_params)
#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \
struct drm_nvidia_get_dev_info_params)
/*
* XXX Solaris compiler has issues with DRM_IO. None of this is supported on
* Solaris anyway, so just skip it.
*
* 'warning: suggest parentheses around arithmetic in operand of |'
*/
#if defined(NV_LINUX)
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_DMABUF_SUPPORTED)
#else
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED 0
#endif
#define DRM_IOCTL_NVIDIA_PRIME_FENCE_CONTEXT_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE),\
struct drm_nvidia_prime_fence_context_create_params)
#define DRM_IOCTL_NVIDIA_GEM_PRIME_FENCE_ATTACH \
DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH), \
struct drm_nvidia_gem_prime_fence_attach_params)
#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \
struct drm_nvidia_get_client_capability_params)
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \
struct drm_nvidia_get_crtc_crc32_params)
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \
struct drm_nvidia_get_crtc_crc32_v2_params)
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \
struct drm_nvidia_gem_export_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \
struct drm_nvidia_gem_map_offset_params)
#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \
struct drm_nvidia_gem_alloc_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \
struct drm_nvidia_gem_export_dmabuf_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \
struct drm_nvidia_gem_identify_object_params)
#define DRM_IOCTL_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID),\
struct drm_nvidia_get_dpy_id_for_connector_id_params)
#define DRM_IOCTL_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID),\
struct drm_nvidia_get_connector_id_for_dpy_id_params)
#define DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GRANT_PERMISSIONS), \
struct drm_nvidia_grant_permissions_params)
#define DRM_IOCTL_NVIDIA_REVOKE_PERMISSIONS \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
struct drm_nvidia_revoke_permissions_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
struct drm_nvidia_semsurf_fence_ctx_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
struct drm_nvidia_semsurf_fence_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
struct drm_nvidia_semsurf_fence_wait_params)
struct drm_nvidia_gem_import_nvkms_memory_params {
uint64_t mem_size; /* IN */
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
uint32_t handle; /* OUT */
uint32_t __pad;
};
struct drm_nvidia_gem_import_userspace_memory_params {
uint64_t size; /* IN Size of memory in bytes */
uint64_t address; /* IN Virtual address of userspace memory */
uint32_t handle; /* OUT Handle to gem object */
};
struct drm_nvidia_get_dev_info_params {
uint32_t gpu_id; /* OUT */
uint32_t primary_index; /* OUT; the "card%d" value */
/* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */
uint32_t generic_page_kind; /* OUT */
uint32_t page_kind_generation; /* OUT */
uint32_t sector_layout; /* OUT */
uint32_t supports_sync_fd; /* OUT */
};
struct drm_nvidia_prime_fence_context_create_params {
uint32_t handle; /* OUT GEM handle to fence context */
uint32_t index; /* IN Index of semaphore to use for fencing */
uint64_t size; /* IN Size of semaphore surface in bytes */
/* Params for importing userspace semaphore surface */
uint64_t import_mem_nvkms_params_ptr; /* IN */
uint64_t import_mem_nvkms_params_size; /* IN */
/* Params for creating software signaling event */
uint64_t event_nvkms_params_ptr; /* IN */
uint64_t event_nvkms_params_size; /* IN */
};
struct drm_nvidia_gem_prime_fence_attach_params {
uint32_t handle; /* IN GEM handle to attach fence to */
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
uint32_t __pad;
};
struct drm_nvidia_get_client_capability_params {
uint64_t capability; /* IN Client capability enum */
uint64_t value; /* OUT Client capability value */
};
/* Struct that stores Crc value and if it is supported by hardware */
struct drm_nvidia_crtc_crc32 {
uint32_t value; /* Read value, undefined if supported is false */
uint8_t supported; /* Supported boolean, true if readable by hardware */
uint8_t __pad0;
uint16_t __pad1;
};
struct drm_nvidia_crtc_crc32_v2_out {
struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */
struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */
struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */
};
struct drm_nvidia_get_crtc_crc32_v2_params {
uint32_t crtc_id; /* IN CRTC identifier */
struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */
};
struct drm_nvidia_get_crtc_crc32_params {
uint32_t crtc_id; /* IN CRTC identifier */
uint32_t crc32; /* OUT CRC32 value */
};
struct drm_nvidia_gem_export_nvkms_memory_params {
uint32_t handle; /* IN */
uint32_t __pad;
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
};
struct drm_nvidia_gem_map_offset_params {
uint32_t handle; /* IN Handle to gem object */
uint32_t __pad;
uint64_t offset; /* OUT Fake offset */
};
#define NV_GEM_ALLOC_NO_SCANOUT (1 << 0)
struct drm_nvidia_gem_alloc_nvkms_memory_params {
uint32_t handle; /* OUT */
uint8_t block_linear; /* IN */
uint8_t compressible; /* IN/OUT */
uint16_t __pad0;
uint64_t memory_size; /* IN */
uint32_t flags; /* IN */
uint32_t __pad1;
};
struct drm_nvidia_gem_export_dmabuf_memory_params {
uint32_t handle; /* IN GEM Handle*/
uint32_t __pad;
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
};
typedef enum {
NV_GEM_OBJECT_NVKMS,
NV_GEM_OBJECT_DMABUF,
NV_GEM_OBJECT_USERMEMORY,
NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */
} drm_nvidia_gem_object_type;
struct drm_nvidia_gem_identify_object_params {
uint32_t handle; /* IN GEM handle*/
drm_nvidia_gem_object_type object_type; /* OUT GEM object type */
};
struct drm_nvidia_get_dpy_id_for_connector_id_params {
uint32_t connectorId; /* IN */
uint32_t dpyId; /* OUT */
};
struct drm_nvidia_get_connector_id_for_dpy_id_params {
uint32_t dpyId; /* IN */
uint32_t connectorId; /* OUT */
};
struct drm_nvidia_grant_permissions_params {
int32_t fd; /* IN */
uint32_t dpyId; /* IN */
};
struct drm_nvidia_revoke_permissions_params {
uint32_t dpyId; /* IN */
};
struct drm_nvidia_semsurf_fence_ctx_create_params {
uint64_t index; /* IN Index of the desired semaphore in the
* fence context's semaphore surface */
/* Params for importing userspace semaphore surface */
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
uint32_t handle; /* OUT GEM handle to fence context */
uint32_t __pad;
};
struct drm_nvidia_semsurf_fence_create_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
* fence is run on */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
int32_t fd; /* OUT sync FD object representing the
* semaphore at the specified index reaching
* a value >= wait_value */
uint32_t __pad;
};
/*
* Note there is no provision for timeouts in this ioctl. The kernel
* documentation asserts timeouts should be handled by fence producers, and
* that waiters should not second-guess their logic, as it is producers rather
* than consumers that have better information when it comes to determining a
* reasonable timeout for a given workload.
*/
struct drm_nvidia_semsurf_fence_wait_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
* be used to wait on the sync FD. Need not
* be the fence context used to create the
* sync FD. */
int32_t fd; /* IN sync FD object to wait on */
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
* fence_context to reach this value before
* waiting for the sync file. */
uint64_t post_wait_value; /* IN Signal the semaphore represented by
* fence_context to this value after waiting
* for the sync file */
};
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */

View File

@@ -0,0 +1,307 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include "nvidia-drm-os-interface.h"
#include "nvidia-drm.h"
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
#include <linux/file.h>
#include <linux/sync_file.h>
#endif
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include "nv-mm.h"
MODULE_PARM_DESC(
modeset,
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
bool nv_drm_modeset_module_param = false;
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
//
// Check for overflow.
//
if ((nmemb != 0) && ((total_size / nmemb) != size))
{
return NULL;
}
return kzalloc(nmemb * size, GFP_KERNEL);
}
void nv_drm_free(void *ptr)
{
if (IS_ERR(ptr)) {
return;
}
kfree(ptr);
}
char *nv_drm_asprintf(const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
return p;
}
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_FAMILY_ARM)
#if defined(NVCPU_ARM)
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
#elif defined(NVCPU_AARCH64)
#define WRITE_COMBINE_FLUSH() mb()
#endif
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#endif
void nv_drm_write_combine_flush(void)
{
WRITE_COMBINE_FLUSH();
}
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages)
{
struct mm_struct *mm = current->mm;
struct page **user_pages;
int pages_pinned;
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
if (user_pages == NULL) {
return -ENOMEM;
}
nv_mmap_read_lock(mm);
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
user_pages, NULL);
nv_mmap_read_unlock(mm);
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
goto failed;
}
*pages = user_pages;
return 0;
failed:
if (pages_pinned > 0) {
int i;
for (i = 0; i < pages_pinned; i++) {
NV_UNPIN_USER_PAGE(user_pages[i]);
}
}
nv_drm_free(user_pages);
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
}
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
{
unsigned long i;
for (i = 0; i < pages_count; i++) {
set_page_dirty_lock(pages[i]);
NV_UNPIN_USER_PAGE(pages[i]);
}
nv_drm_free(pages);
}
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
{
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
}
void nv_drm_vunmap(void *address)
{
vunmap(address);
}
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
{
worker->shutting_down = false;
if (nv_kthread_q_init(&worker->q, name)) {
return false;
}
spin_lock_init(&worker->lock);
return true;
}
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
{
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
worker->shutting_down = true;
spin_unlock_irqrestore(&worker->lock, flags);
nv_kthread_q_stop(&worker->q);
}
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg)
{
nv_kthread_q_item_init(work, callback, arg);
}
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&worker->lock, flags);
if (!worker->shutting_down) {
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
{
nv_timer_setup(timer, callback);
}
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
{
mod_timer(&timer->kernel_timer, timeout_native);
}
unsigned long nv_drm_timer_now(void)
{
return jiffies;
}
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
{
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
{
if (del_timer_sync(&timer->kernel_timer)) {
return true;
} else {
return false;
}
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
return fd;
}
/* sync_file_create() generates its own reference to the fence */
sync = sync_file_create(fence);
if (IS_ERR(sync)) {
put_unused_fd(fd);
return PTR_ERR(sync);
}
fd_install(fd, sync->file);
return fd;
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
return -EINVAL;
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
}
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
{
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
return sync_file_get_fence(fd);
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
return NULL;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
#endif /* NV_DRM_AVAILABLE */
/*************************************************************************
* Linux loading support code.
*************************************************************************/
static int __init nv_linux_drm_init(void)
{
return nv_drm_init();
}
static void __exit nv_linux_drm_exit(void)
{
nv_drm_exit();
}
module_init(nv_linux_drm_init);
module_exit(nv_linux_drm_exit);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);

View File

@@ -0,0 +1,580 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-modeset.h"
#include "nvidia-drm-crtc.h"
#include "nvidia-drm-os-interface.h"
#include "nvidia-drm-helper.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
struct nv_drm_atomic_state {
struct NvKmsKapiRequestedModeSetConfig config;
struct drm_atomic_state base;
};
static inline struct nv_drm_atomic_state *to_nv_atomic_state(
struct drm_atomic_state *state)
{
return container_of(state, struct nv_drm_atomic_state, base);
}
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev)
{
struct nv_drm_atomic_state *nv_state =
nv_drm_calloc(1, sizeof(*nv_state));
if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) {
nv_drm_free(nv_state);
return NULL;
}
return &nv_state->base;
}
void nv_drm_atomic_state_clear(struct drm_atomic_state *state)
{
drm_atomic_state_default_clear(state);
}
void nv_drm_atomic_state_free(struct drm_atomic_state *state)
{
struct nv_drm_atomic_state *nv_state =
to_nv_atomic_state(state);
drm_atomic_state_default_release(state);
nv_drm_free(nv_state);
}
/**
* __will_generate_flip_event - Check whether event is going to be generated by
* hardware when it flips from old crtc/plane state to current one. This
* function is called after drm_atomic_helper_swap_state(), therefore new state
* is swapped into current state.
*/
static bool __will_generate_flip_event(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_crtc_state *new_crtc_state = crtc->state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane *plane = NULL;
int i;
if (!old_crtc_state->active && !new_crtc_state->active) {
/*
* crtc is not active in old and new states therefore all planes are
* disabled, hardware can not generate flip events.
*/
return false;
}
/* Find out whether primary & overlay flip done events will be generated. */
nv_drm_for_each_plane_in_state(old_crtc_state->state,
plane, old_plane_state, i) {
if (old_plane_state->crtc != crtc) {
continue;
}
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
continue;
}
/*
* Hardware generates flip event for only those
* planes which were active previously.
*/
if (old_crtc_state->active && old_plane_state->fb != NULL) {
nv_new_crtc_state->nv_flip->pending_events++;
}
}
return nv_new_crtc_state->nv_flip->pending_events != 0;
}
static int __nv_drm_put_back_post_fence_fd(
struct nv_drm_plane_state *plane_state,
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
{
int fd = layer_reply_config->postSyncptFd;
int ret = 0;
if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd));
if (ret != 0) {
return ret;
}
/*! set back to Null and let set_property specify it again */
plane_state->fd_user_ptr = NULL;
}
return ret;
}
static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state,
struct NvKmsKapiRequestedModeSetConfig *requested_config,
struct NvKmsKapiModeSetReplyConfig *reply_config)
{
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct NvKmsKapiHeadReplyConfig *head_reply_config;
struct nv_drm_plane_state *plane_state;
struct drm_crtc_state *new_crtc_state = crtc->state;
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
struct drm_plane *plane = NULL;
int i, ret;
if (!old_crtc_state->active && !new_crtc_state->active) {
/*
* crtc is not active in old and new states therefore all planes are
* disabled, exit early.
*/
return 0;
}
head_reply_config = &reply_config->headReplyConfig[nv_crtc->head];
nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) {
continue;
}
new_plane_state = plane->state;
if (new_plane_state->crtc != crtc) {
continue;
}
plane_state = to_nv_drm_plane_state(new_plane_state);
ret = __nv_drm_put_back_post_fence_fd(
plane_state,
&head_reply_config->layerReplyConfig[nv_plane->layer_idx]);
if (ret != 0) {
return ret;
}
}
return 0;
}
/**
* nv_drm_atomic_commit - validate/commit modeset config
* @dev: DRM device
* @state: atomic state tracking atomic update
* @commit: commit/check modeset config associated with atomic update
*
* @state tracks atomic update and modeset objects affected
* by the atomic update, but the state of the modeset objects it contains
* depends on the current stage of the update.
* At the commit stage, the proposed state is already stored in the current
* state, and @state contains old state for all affected modeset objects.
* At the check/validation stage, @state contains the proposed state for
* all affected objects.
*
* Sequence of atomic update -
* 1. The check/validation of proposed atomic state,
* 2. Do any other steps that might fail,
* 3. Put the proposed state into the current state pointers,
* 4. Actually commit the hardware state,
* 5. Cleanup old state.
*
* The function nv_drm_atomic_apply_modeset_config() is getting called
* at stages (1) and (4) after drm_atomic_helper_swap_state().
*/
static int
nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
struct drm_atomic_state *state,
bool commit)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiRequestedModeSetConfig *requested_config =
&(to_nv_atomic_state(state)->config);
struct NvKmsKapiModeSetReplyConfig reply_config = { };
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
int ret;
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* When committing a state, the new state is already stored in
* crtc->state. When checking a proposed state, the proposed state is
* stored in crtc_state.
*/
struct drm_crtc_state *new_crtc_state =
commit ? crtc->state : crtc_state;
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
nv_new_crtc_state->nv_flip->event = new_crtc_state->event;
nv_new_crtc_state->nv_flip->pending_events = 0;
new_crtc_state->event = NULL;
/*
* If flip event will be generated by hardware
* then defer flip object processing to flip event from hardware.
*/
if (__will_generate_flip_event(crtc, old_crtc_state)) {
nv_drm_crtc_enqueue_flip(nv_crtc,
nv_new_crtc_state->nv_flip);
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
#endif
}
}
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
/*
* XXX This call is required only if dumb buffer is going
* to be presented.
*/
nv_drm_write_combine_flush();
}
if (!nvKms->applyModeSetConfig(nv_dev->pDevice,
requested_config,
&reply_config,
commit)) {
return -EINVAL;
}
if (commit && nv_dev->supportsSyncpts) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
ret = __nv_drm_get_syncpt_data(
nv_dev, crtc, crtc_state, requested_config, &reply_config);
if (ret != 0) {
return ret;
}
}
}
return 0;
}
int nv_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret = 0;
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
goto done;
}
ret = nv_drm_atomic_apply_modeset_config(dev,
state, false /* commit */);
done:
return ret;
}
/**
* __nv_drm_handle_flip_event - handle flip occurred event
* @nv_crtc: crtc on which flip has been occurred
*
* This handler dequeues the first nv_drm_flip from the crtc's flip_list,
* generates an event if requested at flip time, and frees the nv_drm_flip.
*/
static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_flip *nv_flip;
/*
* Acquire event_lock before nv_flip object dequeue, otherwise immediate
* flip event delivery from nv_drm_atomic_commit() races ahead and
* messes up with event delivery order.
*/
spin_lock(&dev->event_lock);
nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc);
if (likely(nv_flip != NULL)) {
struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip;
if (nv_flip->event != NULL) {
drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event);
}
/*
* Process flips that were deferred until processing of this nv_flip
* object.
*/
list_for_each_entry_safe(nv_deferred_flip,
nv_next_deferred_flip,
&nv_flip->deferred_flip_list, list_entry) {
if (nv_deferred_flip->event != NULL) {
drm_crtc_send_vblank_event(&nv_crtc->base,
nv_deferred_flip->event);
}
list_del(&nv_deferred_flip->list_entry);
nv_drm_free(nv_deferred_flip);
}
}
spin_unlock(&dev->event_lock);
wake_up_all(&nv_dev->flip_event_wq);
nv_drm_free(nv_flip);
}
int nv_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
int ret = -EBUSY;
int i;
struct drm_crtc *crtc = NULL;
struct drm_crtc_state *crtc_state = NULL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
/*
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if previous updates (commit tasks/flip event) are
* pending. In case of blocking commits it mandates to wait for previous
* updates to complete.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (!list_empty(&nv_crtc->flip_list)) {
return -EBUSY;
}
}
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
/*
* nv_drm_atomic_commit_internal()
* implements blocking/non-blocking atomic commit using
* nv_drm_crtc::flip_list, it does not require any help from core DRM
* helper functions to stall commit processing. Therefore passing false to
* 'stall' parameter.
* In this context, failure from drm_atomic_helper_swap_state() is not
* expected.
*/
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT)
ret = drm_atomic_helper_swap_state(state, false /* stall */);
if (WARN_ON(ret != 0)) {
return ret;
}
#else
drm_atomic_helper_swap_state(state, false /* stall */);
#endif
#else
drm_atomic_helper_swap_state(dev, state);
#endif
/*
* nv_drm_atomic_commit_internal() must not return failure after
* calling drm_atomic_helper_swap_state().
*/
if ((ret = nv_drm_atomic_apply_modeset_config(
dev,
state, true /* commit */)) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to apply atomic modeset. Error code: %d",
ret);
goto done;
}
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(crtc->state);
/*
* If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip
* object, no event will be generated for this flip, and we need process
* it:
*/
if (nv_new_crtc_state->nv_flip != NULL) {
/*
* First, defer processing of all pending flips for this crtc until
* last flip in the queue has been processed. This is to ensure a
* correct order in event delivery.
*/
spin_lock(&nv_crtc->flip_list_lock);
if (!list_empty(&nv_crtc->flip_list)) {
struct nv_drm_flip *nv_last_flip =
list_last_entry(&nv_crtc->flip_list,
struct nv_drm_flip, list_entry);
list_add(&nv_new_crtc_state->nv_flip->list_entry,
&nv_last_flip->deferred_flip_list);
nv_new_crtc_state->nv_flip = NULL;
}
spin_unlock(&nv_crtc->flip_list_lock);
}
if (nv_new_crtc_state->nv_flip != NULL) {
/*
* Then, if no more pending flips for this crtc, deliver event for the
* current flip.
*/
if (nv_new_crtc_state->nv_flip->event != NULL) {
spin_lock(&dev->event_lock);
drm_crtc_send_vblank_event(crtc,
nv_new_crtc_state->nv_flip->event);
spin_unlock(&dev->event_lock);
}
nv_drm_free(nv_new_crtc_state->nv_flip);
nv_new_crtc_state->nv_flip = NULL;
}
if (!nonblock) {
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (wait_event_timeout(
nv_dev->flip_event_wq,
list_empty(&nv_crtc->flip_list),
3 * HZ /* 3 second */) == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
}
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
/*
* If ref counting is present, state will be freed when the caller
* drops its reference after we return.
*/
#else
drm_atomic_state_free(state);
#endif
return 0;
}
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
NvU32 head, NvU32 plane)
{
struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head);
if (NV_DRM_WARN(nv_crtc == NULL)) {
return;
}
__nv_drm_handle_flip_event(nv_crtc);
}
#endif

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_MODESET_H__
#define __NVIDIA_DRM_MODESET_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvkms-kapi.h"
struct drm_device;
struct drm_atomic_state;
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev);
void nv_drm_atomic_state_clear(struct drm_atomic_state *state);
void nv_drm_atomic_state_free(struct drm_atomic_state *state);
int nv_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int nv_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
NvU32 head, NvU32 plane);
int nv_drm_shut_down_all_crtcs(struct drm_device *dev);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_MODESET_H__ */

View File

@@ -0,0 +1,116 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_OS_INTERFACE_H__
#define __NVIDIA_DRM_OS_INTERFACE_H__
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#include "nvtypes.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#if defined(NV_LINUX)
#include "nv-kthread-q.h"
#include "linux/spinlock.h"
typedef struct nv_drm_workthread {
spinlock_t lock;
struct nv_kthread_q q;
bool shutting_down;
} nv_drm_workthread;
typedef nv_kthread_q_item_t nv_drm_work;
#else /* defined(NV_LINUX) */
#error "Need to define deferred work primitives for this OS"
#endif /* else defined(NV_LINUX) */
#if defined(NV_LINUX)
#include "nv-timer.h"
typedef struct nv_timer nv_drm_timer;
#else /* defined(NV_LINUX) */
#error "Need to define kernel timer callback primitives for this OS"
#endif /* else defined(NV_LINUX) */
struct page;
/* Set to true when the atomic modeset feature is enabled. */
extern bool nv_drm_modeset_module_param;
void *nv_drm_calloc(size_t nmemb, size_t size);
void nv_drm_free(void *ptr);
char *nv_drm_asprintf(const char *fmt, ...);
void nv_drm_write_combine_flush(void);
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages);
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void nv_drm_vunmap(void *address);
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
/* Can be called concurrently with nv_drm_workthread_add_work() */
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg);
/* Can be called concurrently with nv_drm_workthread_shutdown() */
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
void nv_drm_timer_setup(nv_drm_timer *timer,
void (*callback)(nv_drm_timer *nv_drm_timer));
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */

View File

@@ -0,0 +1,170 @@
/*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_PRIV_H__
#define __NVIDIA_DRM_PRIV_H__
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
#include <drm/drm_device.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvidia-drm-os-interface.h"
#include "nvkms-kapi.h"
#define NV_DRM_LOG_ERR(__fmt, ...) \
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
/*
* DRM_WARN() was added in v4.9 by kernel commit
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
*
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
* DRM_INFO(fmt, ...) was defined as
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
* same pattern as DRM_INFO.
*/
#ifdef DRM_WARN
#define NV_DRM_LOG_WARN(__fmt, ...) \
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#else
#define NV_DRM_LOG_WARN(__fmt, ...) \
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#endif
#define NV_DRM_LOG_INFO(__fmt, ...) \
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_WARN(__condition) WARN_ON((__condition))
#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \
DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
struct nv_drm_device {
nv_gpu_info_t gpu_info;
struct drm_device *dev;
struct NvKmsKapiDevice *pDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* Lock to protect drm-subsystem and fields of this structure
* from concurrent access.
*
* Do not hold this lock if some lock from core drm-subsystem
* is already held, locking order should be like this -
*
* mutex_lock(nv_drm_device::lock);
* ....
* mutex_lock(drm_device::mode_config::lock);
* ....
* .......
* mutex_unlock(drm_device::mode_config::lock);
* ........
* ..
* mutex_lock(drm_device::struct_mutex);
* ....
* ........
* mutex_unlock(drm_device::struct_mutex);
* ..
* mutex_unlock(nv_drm_device::lock);
*/
struct mutex lock;
NvU32 pitchAlignment;
NvU8 genericPageKind;
NvU8 pageKindGeneration;
NvU8 sectorLayout;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
#endif
atomic_t enable_event_handling;
/**
* @flip_event_wq:
*
* The wait queue on which nv_drm_atomic_commit_internal() sleeps until
* next flip event occurs.
*/
wait_queue_head_t flip_event_wq;
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
NvU64 semsurf_stride;
NvU64 semsurf_max_submitted_offset;
#endif
NvBool hasVideoMemory;
NvBool supportsSyncpts;
struct drm_property *nv_out_fence_property;
struct drm_property *nv_input_colorspace_property;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property *nv_hdr_output_metadata_property;
#endif
struct nv_drm_device *next;
};
static inline struct nv_drm_device *to_nv_device(
struct drm_device *dev)
{
return dev->dev_private;
}
extern const struct NvKmsKapiFunctionsTable* const nvKms;
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_PRIV_H__ */

View File

@@ -0,0 +1,231 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_PLANE_H_PRESENT)
#include <drm/drm_plane.h>
#endif
#include <drm/drm_modes.h>
#include <uapi/drm/drm_fourcc.h>
#include "nvidia-drm-priv.h"
#include "nvidia-drm-utils.h"
struct NvKmsKapiConnectorInfo*
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
NvKmsKapiConnector hConnector)
{
struct NvKmsKapiConnectorInfo *connectorInfo =
nv_drm_calloc(1, sizeof(*connectorInfo));
if (connectorInfo == NULL) {
return ERR_PTR(-ENOMEM);
}
if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) {
nv_drm_free(connectorInfo);
return ERR_PTR(-EINVAL);
}
return connectorInfo;
}
int
nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format)
{
switch (format) {
default:
case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN:
return DRM_MODE_ENCODER_NONE;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS:
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP:
return DRM_MODE_ENCODER_TMDS;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS:
return DRM_MODE_ENCODER_LVDS;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA:
return DRM_MODE_ENCODER_DAC;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI:
return DRM_MODE_ENCODER_DSI;
}
}
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
NvBool internal)
{
switch (type) {
default:
case NVKMS_CONNECTOR_TYPE_UNKNOWN:
return DRM_MODE_CONNECTOR_Unknown;
case NVKMS_CONNECTOR_TYPE_DP:
return
internal ?
DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort;
case NVKMS_CONNECTOR_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case NVKMS_CONNECTOR_TYPE_DVI_D:
return DRM_MODE_CONNECTOR_DVID;
case NVKMS_CONNECTOR_TYPE_DVI_I:
return DRM_MODE_CONNECTOR_DVII;
case NVKMS_CONNECTOR_TYPE_LVDS:
return DRM_MODE_CONNECTOR_LVDS;
case NVKMS_CONNECTOR_TYPE_VGA:
return DRM_MODE_CONNECTOR_VGA;
case NVKMS_CONNECTOR_TYPE_DSI:
return DRM_MODE_CONNECTOR_DSI;
case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER:
return DRM_MODE_CONNECTOR_DisplayPort;
}
}
void
nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode,
struct drm_display_mode *mode)
{
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */
#endif
mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */
mode->hdisplay = displayMode->timings.hVisible;
mode->hsync_start = displayMode->timings.hSyncStart;
mode->hsync_end = displayMode->timings.hSyncEnd;
mode->htotal = displayMode->timings.hTotal;
mode->hskew = displayMode->timings.hSkew;
mode->vdisplay = displayMode->timings.vVisible;
mode->vsync_start = displayMode->timings.vSyncStart;
mode->vsync_end = displayMode->timings.vSyncEnd;
mode->vtotal = displayMode->timings.vTotal;
if (displayMode->timings.flags.interlaced) {
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
if (displayMode->timings.flags.doubleScan) {
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
if (displayMode->timings.flags.hSyncPos) {
mode->flags |= DRM_MODE_FLAG_PHSYNC;
}
if (displayMode->timings.flags.hSyncNeg) {
mode->flags |= DRM_MODE_FLAG_NHSYNC;
}
if (displayMode->timings.flags.vSyncPos) {
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
if (displayMode->timings.flags.vSyncNeg) {
mode->flags |= DRM_MODE_FLAG_NVSYNC;
}
mode->width_mm = displayMode->timings.widthMM;
mode->height_mm = displayMode->timings.heightMM;
if (strlen(displayMode->name) != 0) {
memcpy(
mode->name, displayMode->name,
min(sizeof(mode->name), sizeof(displayMode->name)));
mode->name[sizeof(mode->name) - 1] = '\0';
} else {
drm_mode_set_name(mode);
}
}
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
struct NvKmsKapiDisplayMode *dst)
{
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
dst->timings.refreshRate = src->vrefresh * 1000;
#else
dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000;
#endif
dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */
dst->timings.hVisible = src->hdisplay;
dst->timings.hSyncStart = src->hsync_start;
dst->timings.hSyncEnd = src->hsync_end;
dst->timings.hTotal = src->htotal;
dst->timings.hSkew = src->hskew;
dst->timings.vVisible = src->vdisplay;
dst->timings.vSyncStart = src->vsync_start;
dst->timings.vSyncEnd = src->vsync_end;
dst->timings.vTotal = src->vtotal;
if (src->flags & DRM_MODE_FLAG_INTERLACE) {
dst->timings.flags.interlaced = NV_TRUE;
} else {
dst->timings.flags.interlaced = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_DBLSCAN) {
dst->timings.flags.doubleScan = NV_TRUE;
} else {
dst->timings.flags.doubleScan = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_PHSYNC) {
dst->timings.flags.hSyncPos = NV_TRUE;
} else {
dst->timings.flags.hSyncPos = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_NHSYNC) {
dst->timings.flags.hSyncNeg = NV_TRUE;
} else {
dst->timings.flags.hSyncNeg = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_PVSYNC) {
dst->timings.flags.vSyncPos = NV_TRUE;
} else {
dst->timings.flags.vSyncPos = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_NVSYNC) {
dst->timings.flags.vSyncNeg = NV_TRUE;
} else {
dst->timings.flags.vSyncNeg = NV_FALSE;
}
dst->timings.widthMM = src->width_mm;
dst->timings.heightMM = src->height_mm;
memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name)));
}
#endif

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_UTILS_H__
#define __NVIDIA_DRM_UTILS_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvkms-kapi.h"
enum drm_plane_type;
struct drm_display_mode;
struct NvKmsKapiConnectorInfo*
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
NvKmsKapiConnector hConnector);
int nvkms_connector_signal_to_drm_encoder_signal(
NvKmsConnectorSignalFormat format);
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
NvBool internal);
void nvkms_display_mode_to_drm_mode(
const struct NvKmsKapiDisplayMode *displayMode,
struct drm_display_mode *mode);
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
struct NvKmsKapiDisplayMode *dst);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_UTILS_H__ */

View File

@@ -0,0 +1,139 @@
###########################################################################
# Kbuild fragment for nvidia-drm.ko
###########################################################################
#
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
#
NVIDIA_DRM_SOURCES =
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
obj-m += nvidia-drm.o
nvidia-drm-y := $(NVIDIA_DRM_OBJECTS)
NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko
NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO)
#
# Define nvidia-drm.ko-specific CFLAGS.
#
NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm
NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
#
# Register the conftests needed by nvidia-drm.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-drv.h"
static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = {
.versionString = NV_VERSION_STRING,
};
const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable;
#endif
int nv_drm_init(void)
{
#if defined(NV_DRM_AVAILABLE)
if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) {
NV_DRM_LOG_ERR(
"Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)",
nvKmsFuncsTable.versionString, NV_VERSION_STRING);
return -EINVAL;
}
return nv_drm_probe_devices();
#else
return 0;
#endif
}
void nv_drm_exit(void)
{
#if defined(NV_DRM_AVAILABLE)
nv_drm_remove_devices();
#endif
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_H__
#define __NVIDIA_DRM_H__
#include "nvidia-drm-conftest.h"
int nv_drm_init(void);
void nv_drm_exit(void);
#endif /* __NVIDIA_DRM_H__ */