Files
linux-nvgpu/drivers/gpu/nvgpu/os/posix/kmem.c
srajum 585c3ab1c1 gpu: nvgpu: fixing MISRA violations
- Rule 4.12
  Dynamic memory allocation shall not be used.

- Rule 8.6
  "gp10b_device_info_parse_data" is declared but never defined

- Rule 5.7
  A tag name shall be a unique identifier

JIRA NVGPU-6536

Change-Id: I2f234d4aadd217f13b51e4dcadfa13d284a3750f
Signed-off-by: srajum <srajum@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2582076
(cherry picked from commit 7394eedcdfd606a4687adba1ce82e96b5d6e23f8)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2677542
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Ankur Kishore <ankkishore@nvidia.com>
GVS: Gerrit_Virtual_Submit
2022-03-08 05:31:42 -08:00

273 lines
6.8 KiB
C

/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <nvgpu/bug.h>
#include <nvgpu/log.h>
#include <nvgpu/kmem.h>
#include <nvgpu/types.h>
#include <nvgpu/atomic.h>
#include <nvgpu/posix/kmem.h>
#include <nvgpu/posix/sizes.h>
#include <nvgpu/posix/bug.h>
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
#include <nvgpu/posix/posix-fault-injection.h>
#endif
#ifdef __NVGPU_UNIT_TEST__
#define CACHE_NAME_LEN 128
#endif
struct nvgpu_kmem_cache {
struct gk20a *g;
size_t size;
#ifdef __NVGPU_UNIT_TEST__
char name[CACHE_NAME_LEN];
#endif
};
#ifdef __NVGPU_UNIT_TEST__
static nvgpu_atomic_t kmem_cache_id;
#endif
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
struct nvgpu_posix_fault_inj *nvgpu_kmem_get_fault_injection(void)
{
struct nvgpu_posix_fault_inj_container *c =
nvgpu_posix_fault_injection_get_container();
return &c->kmem_fi;
}
#endif
/*
* kmem cache emulation: basically just do a regular malloc(). This is slower
* but should not affect a user of kmem cache in the slightest bit.
*/
struct nvgpu_kmem_cache *nvgpu_kmem_cache_create(struct gk20a *g, size_t size)
{
struct nvgpu_kmem_cache *cache;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return NULL;
}
#endif
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Directive, 4_12), "TID-1129")
cache = malloc(sizeof(struct nvgpu_kmem_cache));
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Directive, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (cache == NULL) {
return NULL;
}
cache->g = g;
cache->size = size;
#ifdef __NVGPU_UNIT_TEST__
(void)snprintf(cache->name, sizeof(cache->name),
"nvgpu-cache-0x%p-%lu-%d", g, size,
nvgpu_atomic_inc_return(&kmem_cache_id));
#endif
return cache;
}
void nvgpu_kmem_cache_destroy(struct nvgpu_kmem_cache *cache)
{
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(cache);
}
void *nvgpu_kmem_cache_alloc(struct nvgpu_kmem_cache *cache)
{
void *ptr;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return NULL;
}
#endif
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Directive, 4_12), "TID-1129")
ptr = malloc(cache->size);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Directive, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (ptr == NULL) {
nvgpu_warn(NULL, "malloc returns NULL");
return NULL;
}
return ptr;
}
void nvgpu_kmem_cache_free(struct nvgpu_kmem_cache *cache, void *ptr)
{
(void)cache;
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(ptr);
}
void *nvgpu_kmalloc_impl(struct gk20a *g, size_t size, void *ip)
{
void *ptr;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return NULL;
}
#endif
/*
* Since, the callers don't really need the memory region to be
* contiguous, use malloc here. If the need arises for this
* interface to return contiguous memory, we can explore using
* nvmap_page_alloc in qnx (i.e. using shm_open/shm_ctl_special/mmap
* calls).
*/
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 21_3), "TID-1131")
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Directive, 4_12), "TID-1129")
ptr = malloc(size);
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Directive, 4_12))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 21_3))
if (ptr == NULL) {
nvgpu_warn(NULL, "malloc returns NULL");
return NULL;
}
return ptr;
}
void *nvgpu_kzalloc_impl(struct gk20a *g, size_t size, void *ip)
{
void *ptr;
const size_t num = 1;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return NULL;
}
#endif
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
ptr = calloc(num, size);
if (ptr == NULL) {
nvgpu_warn(NULL, "calloc returns NULL");
return NULL;
}
return ptr;
}
void *nvgpu_kcalloc_impl(struct gk20a *g, size_t n, size_t size, void *ip)
{
void *ptr;
const size_t num = 1;
(void)g;
(void)ip;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return NULL;
}
#endif
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
ptr = calloc(num, (nvgpu_safe_mult_u64(n, size)));
if (ptr == NULL) {
nvgpu_warn(NULL, "calloc returns NULL");
return NULL;
}
return ptr;
}
void *nvgpu_vmalloc_impl(struct gk20a *g, unsigned long size, void *ip)
{
return nvgpu_kmalloc_impl(g, size, ip);
}
void *nvgpu_vzalloc_impl(struct gk20a *g, unsigned long size, void *ip)
{
return nvgpu_kzalloc_impl(g, size, ip);
}
void nvgpu_kfree_impl(struct gk20a *g, void *addr)
{
(void)g;
NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 21_3), "TID-1131")
free(addr);
}
void nvgpu_vfree_impl(struct gk20a *g, void *addr)
{
nvgpu_kfree_impl(g, addr);
}
void *nvgpu_big_alloc_impl(struct gk20a *g, size_t size, bool clear)
{
if (clear) {
return nvgpu_kzalloc(g, size);
} else {
return nvgpu_kmalloc(g, size);
}
}
void nvgpu_big_free(struct gk20a *g, void *p)
{
nvgpu_kfree_impl(g, p);
}
int nvgpu_kmem_init(struct gk20a *g)
{
(void)g;
#ifdef NVGPU_UNITTEST_FAULT_INJECTION_ENABLEMENT
if (nvgpu_posix_fault_injection_handle_call(
nvgpu_kmem_get_fault_injection())) {
return -ENOMEM;
}
#endif
/* Nothing to init at the moment. */
return 0;
}
void nvgpu_kmem_fini(struct gk20a *g, int flags)
{
(void)g;
(void)flags;
}