From ee670a0afd767068a9147ed91209c189b466bd8c Mon Sep 17 00:00:00 2001 From: srajum Date: Sat, 7 Aug 2021 00:06:53 +0530 Subject: [PATCH] gpu: nvgpu: allowlist violations wrt RFD - Rule 21.3 The memory allocation and deallocation functions of shall not be used - Rule 4.12 Dynamic memory allocation shall not be used. - These are approved RFD's https://jirasw.nvidia.com/browse/TID-1129 https://jirasw.nvidia.com/browse/TID-1131 JIRA NVGPU-5955 Change-Id: I1bff5d63b406d91f61a333da59cf43b9fb0a3a92 Signed-off-by: srajum Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2572086 (cherry picked from commit c8840abab61a50c7afb561eac884a40a1338397d) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2674342 Reviewed-by: svcacv Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra Reviewed-by: Rajesh Devaraj Reviewed-by: Ankur Kishore GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/os/posix/kmem.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/nvgpu/os/posix/kmem.c b/drivers/gpu/nvgpu/os/posix/kmem.c index 3bac4b026..d7aa70142 100644 --- a/drivers/gpu/nvgpu/os/posix/kmem.c +++ b/drivers/gpu/nvgpu/os/posix/kmem.c @@ -73,7 +73,11 @@ struct nvgpu_kmem_cache *nvgpu_kmem_cache_create(struct gk20a *g, size_t size) return NULL; } #endif + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131") + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129") cache = malloc(sizeof(struct nvgpu_kmem_cache)); + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12)) + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3)) if (cache == NULL) { return NULL; @@ -93,6 +97,7 @@ struct nvgpu_kmem_cache *nvgpu_kmem_cache_create(struct gk20a *g, size_t size) void nvgpu_kmem_cache_destroy(struct nvgpu_kmem_cache *cache) { + NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131") free(cache); } @@ -106,7 +111,11 @@ void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache) return NULL; } #endif + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131") + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129") ptr = malloc(cache->size); + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12)) + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3)) if (ptr == NULL) { nvgpu_warn(NULL, "malloc returns NULL"); @@ -119,6 +128,7 @@ void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache) void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr) { (void)cache; + NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131") free(ptr); } @@ -142,7 +152,11 @@ void *nvgpu_kmalloc_impl(struct gk20a *g, size_t size, void *ip) * nvmap_page_alloc in qnx (i.e. using shm_open/shm_ctl_special/mmap * calls). */ + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131") + NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129") ptr = malloc(size); + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12)) + NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3)) if (ptr == NULL) { nvgpu_warn(NULL, "malloc returns NULL"); @@ -166,6 +180,7 @@ void *nvgpu_kzalloc_impl(struct gk20a *g, size_t size, void *ip) return NULL; } #endif + NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131") ptr = calloc(num, size); if (ptr == NULL) { @@ -190,6 +205,7 @@ void *nvgpu_kcalloc_impl(struct gk20a *g, size_t n, size_t size, void *ip) return NULL; } #endif + NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131") ptr = calloc(num, (nvgpu_safe_mult_u64(n, size))); if (ptr == NULL) { @@ -213,6 +229,7 @@ void *nvgpu_vzalloc_impl(struct gk20a *g, unsigned long size, void *ip) void nvgpu_kfree_impl(struct gk20a *g, void *addr) { (void)g; + NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131") free(addr); }