gpu: nvgpu: fix MISRA violations in utils unit

MISRA rule 11.6 states that a cast shall not be performed between
pointer to void and an arithmetic type.  Fix violations of rule 11.6
in utils unit.

Jira NVGPU-3300

Change-Id: I9513baf326be9618bae9bcfed597bfe27a5a2f47
Signed-off-by: ajesh <akv@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2137305
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ajesh
2019-06-17 14:44:45 +05:30
committed by mobile promotions
parent 2aaf7f4586
commit eaf1048111
7 changed files with 42 additions and 45 deletions

View File

@@ -553,19 +553,24 @@ static int nvgpu_vidmem_clear_all(struct gk20a *g)
return 0;
}
struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes)
int nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes,
struct nvgpu_vidmem_buf **vidmem_buf)
{
struct nvgpu_vidmem_buf *buf;
int err;
if (vidmem_buf == NULL) {
return -EINVAL;
}
err = nvgpu_vidmem_clear_all(g);
if (err != 0) {
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
buf = nvgpu_kzalloc(g, sizeof(*buf));
if (buf == NULL) {
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
buf->g = g;
@@ -586,13 +591,15 @@ struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes)
*/
buf->mem->mem_flags |= NVGPU_MEM_FLAG_USER_MEM;
return buf;
*vidmem_buf = buf;
return 0;
fail:
/* buf will never be NULL here. */
nvgpu_kfree(g, buf->mem);
nvgpu_kfree(g, buf);
return ERR_PTR(err);
return err;
}
void nvgpu_vidmem_buf_free(struct gk20a *g, struct nvgpu_vidmem_buf *buf)
@@ -600,7 +607,7 @@ void nvgpu_vidmem_buf_free(struct gk20a *g, struct nvgpu_vidmem_buf *buf)
/*
* In some error paths it's convenient to be able to "free" a NULL buf.
*/
if (IS_ERR_OR_NULL(buf)) {
if (buf == NULL) {
return;
}

View File

@@ -58,7 +58,8 @@ int vgpu_fecs_trace_init(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);
vcst->cookie = vgpu_ivm_mempool_reserve(mempool);
if (IS_ERR(vcst->cookie)) {
if ((vcst->cookie == NULL) ||
((unsigned long)vcst->cookie >= (unsigned long)-MAX_ERRNO)) {
nvgpu_info(g,
"mempool %u reserve failed", mempool);
vcst->cookie = NULL;

View File

@@ -34,30 +34,40 @@
static struct tegra_hv_ivm_cookie *css_cookie;
static struct tegra_hv_ivm_cookie *vgpu_css_reserve_mempool(struct gk20a *g)
static int vgpu_css_reserve_mempool(struct gk20a *g,
struct tegra_hv_ivm_cookie **cookie_p)
{
struct tegra_hv_ivm_cookie *cookie;
u32 mempool;
int err;
if (cookie_p == NULL) {
return -EINVAL;
}
err = nvgpu_dt_read_u32_index(g, "mempool-css", 1, &mempool);
if (err) {
nvgpu_err(g, "dt missing mempool-css");
return (struct tegra_hv_ivm_cookie *)ERR_PTR(err);
return err;
}
cookie = vgpu_ivm_mempool_reserve(mempool);
if (IS_ERR_OR_NULL(cookie)) {
if ((cookie == NULL) ||
((unsigned long)cookie >= (unsigned long)-MAX_ERRNO)) {
nvgpu_err(g, "mempool %u reserve failed", mempool);
return (struct tegra_hv_ivm_cookie *)ERR_PTR(-EINVAL);
return -EINVAL;
}
return cookie;
*cookie_p = cookie;
return 0;
}
u32 vgpu_css_get_buffer_size(struct gk20a *g)
{
struct tegra_hv_ivm_cookie *cookie;
u32 size;
int err;
nvgpu_log_fn(g, " ");
@@ -67,8 +77,8 @@ u32 vgpu_css_get_buffer_size(struct gk20a *g)
return size;
}
cookie = vgpu_css_reserve_mempool(g);
if (IS_ERR(cookie)) {
err = vgpu_css_reserve_mempool(g, &cookie);
if (0 != err) {
return 0;
}
@@ -92,9 +102,9 @@ static int vgpu_css_init_snapshot_buffer(struct gk20a *g)
return 0;
}
css_cookie = vgpu_css_reserve_mempool(g);
if (IS_ERR(css_cookie)) {
return PTR_ERR(css_cookie);
err = vgpu_css_reserve_mempool(g, &css_cookie);
if (0 != err) {
return err;
}
size = vgpu_ivm_get_size(css_cookie);

View File

@@ -566,7 +566,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
*num_offsets = num_registers;
cleanup:
if (!IS_ERR_OR_NULL(priv_registers)) {
if (priv_registers != NULL) {
nvgpu_kfree(g, priv_registers);
}

View File

@@ -256,28 +256,6 @@ static inline unsigned int nvgpu_posix_hweight64(uint64_t x)
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
static inline void *ERR_PTR(long error)
{
return (void *) error;
}
static inline long PTR_ERR(void *error)
{
return (long)(uintptr_t)error;
}
static inline bool IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline bool IS_ERR_OR_NULL(const void *ptr)
{
return (ptr == NULL) || IS_ERR_VALUE((unsigned long)ptr);
}
#define ERESTARTSYS ERESTART
#endif /* NVGPU_POSIX_UTILS_H */

View File

@@ -54,6 +54,7 @@ struct nvgpu_vidmem_buf {
*
* @g - The GPU.
* @bytes - Size of the buffer in bytes.
* @vidmem_buf - Pointer to return vidmem buffer.
*
* Allocate a generic (OS agnostic) vidmem buffer. This does not allocate the OS
* specific interfacing for userspace sharing. Instead is is expected that the
@@ -64,9 +65,10 @@ struct nvgpu_vidmem_buf {
* extra struct over nvgpu_mem. If a vidmem buffer is needed by the kernel
* driver only then a simple nvgpu_dma_alloc_vid() or the like is sufficient.
*
* Returns a pointer to a vidmem buffer on success, 0 otherwise.
* Returns 0 on success and error value on failure.
*/
struct nvgpu_vidmem_buf *nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes);
int nvgpu_vidmem_user_alloc(struct gk20a *g, size_t bytes,
struct nvgpu_vidmem_buf **vidmem_buf);
void nvgpu_vidmem_buf_free(struct gk20a *g, struct nvgpu_vidmem_buf *buf);

View File

@@ -188,9 +188,8 @@ int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes)
goto fail;
}
buf = nvgpu_vidmem_user_alloc(g, bytes);
if (IS_ERR(buf)) {
err = PTR_ERR(buf);
err = nvgpu_vidmem_user_alloc(g, bytes, &buf);
if (0 != err) {
goto fail;
}