gpu: nvgpu: allowlist violations wrt RFD

- Rule 21.3
  The memory allocation and deallocation functions of <stdlib.h>
  shall not be used

- Rule 4.12
  Dynamic memory allocation shall not be used.

- These are approved RFD's
  https://jirasw.nvidia.com/browse/TID-1129
  https://jirasw.nvidia.com/browse/TID-1131

JIRA NVGPU-5955

Change-Id: I1bff5d63b406d91f61a333da59cf43b9fb0a3a92
Signed-off-by: srajum <srajum@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2572086
(cherry picked from commit c8840abab61a50c7afb561eac884a40a1338397d)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2674342
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-by: Ankur Kishore <ankkishore@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
srajum
2021-08-07 00:06:53 +05:30
committed by mobile promotions
parent d1b3a9359a
commit ee670a0afd

View File

@@ -73,7 +73,11 @@ struct nvgpu_kmem_cache *nvgpu_kmem_cache_create(struct gk20a *g, size_t size)
return NULL;
}
#endif
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129")
cache = malloc(sizeof(struct nvgpu_kmem_cache));
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (cache == NULL) {
return NULL;
@@ -93,6 +97,7 @@ struct nvgpu_kmem_cache *nvgpu_kmem_cache_create(struct gk20a *g, size_t size)
void nvgpu_kmem_cache_destroy(struct nvgpu_kmem_cache *cache)
{
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(cache);
}
@@ -106,7 +111,11 @@ void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache)
return NULL;
}
#endif
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129")
ptr = malloc(cache->size);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (ptr == NULL) {
nvgpu_warn(NULL, "malloc returns NULL");
@@ -119,6 +128,7 @@ void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache)
void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr)
{
(void)cache;
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(ptr);
}
@@ -142,7 +152,11 @@ void *nvgpu_kmalloc_impl(struct gk20a *g, size_t size, void *ip)
* nvmap_page_alloc in qnx (i.e. using shm_open/shm_ctl_special/mmap
* calls).
*/
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 4_12), "TID-1129")
ptr = malloc(size);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (ptr == NULL) {
nvgpu_warn(NULL, "malloc returns NULL");
@@ -166,6 +180,7 @@ void *nvgpu_kzalloc_impl(struct gk20a *g, size_t size, void *ip)
return NULL;
}
#endif
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
ptr = calloc(num, size);
if (ptr == NULL) {
@@ -190,6 +205,7 @@ void *nvgpu_kcalloc_impl(struct gk20a *g, size_t n, size_t size, void *ip)
return NULL;
}
#endif
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
ptr = calloc(num, (nvgpu_safe_mult_u64(n, size)));
if (ptr == NULL) {
@@ -213,6 +229,7 @@ void *nvgpu_vzalloc_impl(struct gk20a *g, unsigned long size, void *ip)
void nvgpu_kfree_impl(struct gk20a *g, void *addr)
{
(void)g;
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(addr);
}