mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: fix MISRA 11.2 and 16.x in nvgpu_mem
MISRA Rule 11.2 doesn't allow conversion to or from an incomplete type pointer, as it may result incorrect point alignment and may further lead to undefined behavior. MISRA Rule 16.x requires all switch statements to be well-formed with terminating break statement for every switch-clause. This patch fixes 11.2 and 16.x violations in common.mm.nvgpu_mem. Jira NVGPU-3339 Change-Id: I002393cc64d44826e6954d1bf6af71bd569e862f Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2113096 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b96ac290c8
commit
b4590adae4
@@ -153,14 +153,14 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a,
|
||||
nvgpu_alloc_release_carveout(&va->source_allocator, co);
|
||||
}
|
||||
|
||||
static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl)
|
||||
static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return (struct nvgpu_sgl *)sgl_impl->next;
|
||||
}
|
||||
|
||||
static u64 nvgpu_page_alloc_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_page_alloc_sgl_phys(struct gk20a *g, void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
@@ -173,22 +173,21 @@ static u64 nvgpu_page_alloc_sgl_ipa_to_pa(struct gk20a *g,
|
||||
return ipa;
|
||||
}
|
||||
|
||||
static u64 nvgpu_page_alloc_sgl_dma(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_page_alloc_sgl_dma(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return sgl_impl->dma;
|
||||
}
|
||||
|
||||
static u64 nvgpu_page_alloc_sgl_length(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_page_alloc_sgl_length(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return sgl_impl->length;
|
||||
}
|
||||
|
||||
static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g,
|
||||
struct nvgpu_sgl *sgl,
|
||||
static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
@@ -39,6 +39,7 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask,
|
||||
u32 vidmem_mask)
|
||||
{
|
||||
u32 ret_mask = 0;
|
||||
if ((aperture == APERTURE_INVALID) || (aperture >= APERTURE_MAX_ENUM)) {
|
||||
nvgpu_do_assert_print(g, "Bad aperture");
|
||||
return 0;
|
||||
@@ -53,17 +54,22 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
|
||||
switch (aperture) {
|
||||
case APERTURE_SYSMEM_COH:
|
||||
return sysmem_coh_mask;
|
||||
ret_mask = sysmem_coh_mask;
|
||||
break;
|
||||
case APERTURE_SYSMEM:
|
||||
return sysmem_mask;
|
||||
ret_mask = sysmem_mask;
|
||||
break;
|
||||
case APERTURE_VIDMEM:
|
||||
return vidmem_mask;
|
||||
ret_mask = vidmem_mask;
|
||||
break;
|
||||
case APERTURE_INVALID:
|
||||
case APERTURE_MAX_ENUM:
|
||||
default:
|
||||
nvgpu_do_assert_print(g, "Bad aperture");
|
||||
return 0;
|
||||
ret_mask = 0;
|
||||
break;
|
||||
}
|
||||
return ret_mask;
|
||||
}
|
||||
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
@@ -221,25 +227,25 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
}
|
||||
}
|
||||
|
||||
static struct nvgpu_sgl *nvgpu_mem_phys_sgl_next(struct nvgpu_sgl *sgl)
|
||||
static struct nvgpu_sgl *nvgpu_mem_phys_sgl_next(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return (struct nvgpu_sgl *)sgl_impl->next;
|
||||
return (struct nvgpu_sgl *)(void *)sgl_impl->next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Provided for compatibility - the DMA address is the same as the phys address
|
||||
* for these nvgpu_mem's.
|
||||
*/
|
||||
static u64 nvgpu_mem_phys_sgl_dma(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_phys_sgl_dma(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return sgl_impl->phys;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_phys_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_phys_sgl_phys(struct gk20a *g, void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
@@ -252,15 +258,14 @@ static u64 nvgpu_mem_phys_sgl_ipa_to_pa(struct gk20a *g,
|
||||
return ipa;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_phys_sgl_length(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_phys_sgl_length(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return sgl_impl->length;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_phys_sgl_gpu_addr(struct gk20a *g,
|
||||
struct nvgpu_sgl *sgl,
|
||||
static u64 nvgpu_mem_phys_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
|
||||
@@ -320,7 +325,7 @@ int nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
|
||||
sgl->next = NULL;
|
||||
sgl->phys = src_phys;
|
||||
sgl->length = dest->size;
|
||||
sgt->sgl = (struct nvgpu_sgl *)sgl;
|
||||
sgt->sgl = (struct nvgpu_sgl *)(void *)sgl;
|
||||
sgt->ops = &nvgpu_mem_phys_ops;
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -39,14 +39,14 @@ struct nvgpu_sgt;
|
||||
struct nvgpu_sgl;
|
||||
|
||||
struct nvgpu_sgt_ops {
|
||||
struct nvgpu_sgl *(*sgl_next)(struct nvgpu_sgl *sgl);
|
||||
u64 (*sgl_phys)(struct gk20a *g, struct nvgpu_sgl *sgl);
|
||||
u64 (*sgl_ipa)(struct gk20a *g, struct nvgpu_sgl *sgl);
|
||||
struct nvgpu_sgl *(*sgl_next)(void *sgl);
|
||||
u64 (*sgl_phys)(struct gk20a *g, void *sgl);
|
||||
u64 (*sgl_ipa)(struct gk20a *g, void *sgl);
|
||||
u64 (*sgl_ipa_to_pa)(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
u64 ipa, u64 *pa_len);
|
||||
u64 (*sgl_dma)(struct nvgpu_sgl *sgl);
|
||||
u64 (*sgl_length)(struct nvgpu_sgl *sgl);
|
||||
u64 (*sgl_gpu_addr)(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
u64 (*sgl_dma)(void *sgl);
|
||||
u64 (*sgl_length)(void *sgl);
|
||||
u64 (*sgl_gpu_addr)(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs);
|
||||
/*
|
||||
* If left NULL then iommuable is assumed to be false.
|
||||
@@ -137,13 +137,13 @@ void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt);
|
||||
bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt);
|
||||
u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt);
|
||||
|
||||
struct nvgpu_sgl *nvgpu_mem_sgl_next(struct nvgpu_sgl *sgl);
|
||||
u64 nvgpu_mem_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl);
|
||||
struct nvgpu_sgl *nvgpu_mem_sgl_next(void *sgl);
|
||||
u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl);
|
||||
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl, u64 ipa,
|
||||
u64 *pa_len);
|
||||
u64 nvgpu_mem_sgl_dma(struct nvgpu_sgl *sgl);
|
||||
u64 nvgpu_mem_sgl_length(struct nvgpu_sgl *sgl);
|
||||
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
u64 nvgpu_mem_sgl_dma(void *sgl);
|
||||
u64 nvgpu_mem_sgl_length(void *sgl);
|
||||
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs);
|
||||
bool nvgpu_mem_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt);
|
||||
void nvgpu_mem_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt);
|
||||
|
||||
@@ -187,12 +187,12 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
|
||||
static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(void *sgl)
|
||||
{
|
||||
return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_ipa(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_linux_sgl_ipa(struct gk20a *g, void *sgl)
|
||||
{
|
||||
return __nvgpu_sgl_ipa(g, sgl);
|
||||
}
|
||||
@@ -209,23 +209,23 @@ static u64 nvgpu_mem_linux_sgl_ipa_to_pa(struct gk20a *g,
|
||||
return ipa;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_linux_sgl_phys(struct gk20a *g, void *sgl)
|
||||
{
|
||||
return (u64)__nvgpu_sgl_phys(g, sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_linux_sgl_dma(void *sgl)
|
||||
{
|
||||
return (u64)sg_dma_address((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_length(struct nvgpu_sgl *sgl)
|
||||
static u64 nvgpu_mem_linux_sgl_length(void *sgl)
|
||||
{
|
||||
return (u64)((struct scatterlist *)sgl)->length;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g,
|
||||
struct nvgpu_sgl *sgl,
|
||||
void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
if (sg_dma_address((struct scatterlist *)sgl) == 0)
|
||||
|
||||
@@ -45,14 +45,14 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
return (u64)(uintptr_t)mem->cpu_va;
|
||||
}
|
||||
|
||||
struct nvgpu_sgl *nvgpu_mem_sgl_next(struct nvgpu_sgl *sgl)
|
||||
struct nvgpu_sgl *nvgpu_mem_sgl_next(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return (struct nvgpu_sgl *) mem->next;
|
||||
}
|
||||
|
||||
u64 nvgpu_mem_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
u64 nvgpu_mem_sgl_phys(struct gk20a *g, void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
@@ -65,21 +65,21 @@ u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
return nvgpu_mem_sgl_phys(g, sgl);
|
||||
}
|
||||
|
||||
u64 nvgpu_mem_sgl_dma(struct nvgpu_sgl *sgl)
|
||||
u64 nvgpu_mem_sgl_dma(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return (u64)(uintptr_t)mem->dma;
|
||||
}
|
||||
|
||||
u64 nvgpu_mem_sgl_length(struct nvgpu_sgl *sgl)
|
||||
u64 nvgpu_mem_sgl_length(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
return (u64)mem->length;
|
||||
}
|
||||
|
||||
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
|
||||
|
||||
@@ -31,13 +31,13 @@
|
||||
|
||||
/* nvgpu_sgt_ops overrides for basic api testing */
|
||||
#define EXPECTED_U64 0x123456789ABCDEF0ULL
|
||||
static u64 ops_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
|
||||
static u64 ops_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
return EXPECTED_U64;
|
||||
}
|
||||
|
||||
static u64 ops_sgl_ipa(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
static u64 ops_sgl_ipa(struct gk20a *g, void *sgl)
|
||||
{
|
||||
return EXPECTED_U64;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user