mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Fix MISRA 21.2 violations (nvgpu_mem.c, mm.c)
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. Handle the 21.2 fixes for nvgpu_mem.c and mm.c; this deletes the '__' prefixes and slightly renames the __nvgpu_aperture_mask() function since there's a coherent version and a general version. Change-Id: Iee871ad90db3f2622f9099bd9992eb994e0fbf34 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1813623 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
ba2a632f03
commit
2c95becc9e
@@ -720,7 +720,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
* correct based on the IO coherency flag.
|
||||
*/
|
||||
if (attrs.coherent && attrs.aperture == APERTURE_SYSMEM) {
|
||||
attrs.aperture = __APERTURE_SYSMEM_COH;
|
||||
attrs.aperture = APERTURE_SYSMEM_COH;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
* Attempt to find a reserved memory area to determine PTE size for the passed
|
||||
* mapping. If no reserved area can be found use small pages.
|
||||
*/
|
||||
u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
|
||||
u64 base, u64 size)
|
||||
static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm,
|
||||
u64 base, u64 size)
|
||||
{
|
||||
struct nvgpu_vm_area *vm_area;
|
||||
|
||||
@@ -52,8 +52,8 @@ u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
|
||||
/*
|
||||
* This is for when the address space does not support unified address spaces.
|
||||
*/
|
||||
static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
|
||||
u64 base, u64 size)
|
||||
static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm,
|
||||
u64 base, u64 size)
|
||||
{
|
||||
if (!base) {
|
||||
if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
|
||||
@@ -61,7 +61,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
|
||||
}
|
||||
return GMMU_PAGE_SIZE_SMALL;
|
||||
} else {
|
||||
if (base < __nv_gmmu_va_small_page_limit()) {
|
||||
if (base < nvgpu_gmmu_va_small_page_limit()) {
|
||||
return GMMU_PAGE_SIZE_SMALL;
|
||||
} else {
|
||||
return GMMU_PAGE_SIZE_BIG;
|
||||
@@ -90,7 +90,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
|
||||
* - Regardless of buffer size use small pages since we have no
|
||||
* - guarantee of contiguity.
|
||||
*/
|
||||
u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
|
||||
u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
|
||||
@@ -99,11 +99,11 @@ u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
|
||||
}
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
|
||||
return __get_pte_size_split_addr(vm, base, size);
|
||||
return nvgpu_vm_get_pte_size_split_addr(vm, base, size);
|
||||
}
|
||||
|
||||
if (base) {
|
||||
return __get_pte_size_fixed_map(vm, base, size);
|
||||
return nvgpu_vm_get_pte_size_fixed_map(vm, base, size);
|
||||
}
|
||||
|
||||
if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] &&
|
||||
|
||||
@@ -33,8 +33,9 @@
|
||||
* will not add any checks. If you want to simply use the default coherency then
|
||||
* use nvgpu_aperture_mask().
|
||||
*/
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask)
|
||||
u32 nvgpu_aperture_mask_coh(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask,
|
||||
u32 vidmem_mask)
|
||||
{
|
||||
/*
|
||||
* Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
|
||||
@@ -45,7 +46,7 @@ u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
}
|
||||
|
||||
switch (aperture) {
|
||||
case __APERTURE_SYSMEM_COH:
|
||||
case APERTURE_SYSMEM_COH:
|
||||
return sysmem_coh_mask;
|
||||
case APERTURE_SYSMEM:
|
||||
return sysmem_mask;
|
||||
@@ -69,16 +70,18 @@ u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) &&
|
||||
ap == APERTURE_SYSMEM) {
|
||||
ap = __APERTURE_SYSMEM_COH;
|
||||
ap = APERTURE_SYSMEM_COH;
|
||||
}
|
||||
|
||||
return __nvgpu_aperture_mask(g, ap,
|
||||
sysmem_mask, sysmem_coh_mask, vidmem_mask);
|
||||
return nvgpu_aperture_mask_coh(g, ap,
|
||||
sysmem_mask,
|
||||
sysmem_coh_mask,
|
||||
vidmem_mask);
|
||||
}
|
||||
|
||||
bool nvgpu_aperture_is_sysmem(enum nvgpu_aperture ap)
|
||||
{
|
||||
return ap == __APERTURE_SYSMEM_COH || ap == APERTURE_SYSMEM;
|
||||
return ap == APERTURE_SYSMEM_COH || ap == APERTURE_SYSMEM;
|
||||
}
|
||||
|
||||
bool nvgpu_mem_is_sysmem(struct nvgpu_mem *mem)
|
||||
|
||||
@@ -359,8 +359,8 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
user_lp_vma_limit = user_vma_limit;
|
||||
} else {
|
||||
user_vma_start = low_hole;
|
||||
user_vma_limit = __nv_gmmu_va_small_page_limit();
|
||||
user_lp_vma_start = __nv_gmmu_va_small_page_limit();
|
||||
user_vma_limit = nvgpu_gmmu_va_small_page_limit();
|
||||
user_lp_vma_start = nvgpu_gmmu_va_small_page_limit();
|
||||
user_lp_vma_limit = vm->va_limit - kernel_reserved;
|
||||
}
|
||||
} else {
|
||||
@@ -892,7 +892,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
if (g->mm.disable_bigpage) {
|
||||
binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL;
|
||||
} else {
|
||||
binfo.pgsz_idx = __get_pte_size(vm, map_addr,
|
||||
binfo.pgsz_idx = nvgpu_vm_get_pte_size(vm, map_addr,
|
||||
min_t(u64, binfo.size, align));
|
||||
}
|
||||
map_size = map_size ? map_size : binfo.size;
|
||||
|
||||
@@ -220,7 +220,7 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
pte_w[0] |= gmmu_pte_privilege_true_f();
|
||||
}
|
||||
|
||||
pte_w[1] = __nvgpu_aperture_mask(g, attrs->aperture,
|
||||
pte_w[1] = nvgpu_aperture_mask_coh(g, attrs->aperture,
|
||||
gmmu_pte_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_pte_aperture_sys_mem_coh_f(),
|
||||
gmmu_pte_aperture_video_memory_f()) |
|
||||
|
||||
@@ -191,7 +191,7 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
u32 pte_addr = attrs->aperture == APERTURE_SYSMEM ?
|
||||
gmmu_new_pte_address_sys_f(phys_shifted) :
|
||||
gmmu_new_pte_address_vid_f(phys_shifted);
|
||||
u32 pte_tgt = __nvgpu_aperture_mask(g,
|
||||
u32 pte_tgt = nvgpu_aperture_mask_coh(g,
|
||||
attrs->aperture,
|
||||
gmmu_new_pte_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_new_pte_aperture_sys_mem_coh_f(),
|
||||
|
||||
@@ -202,14 +202,12 @@ static inline int bar1_aperture_size_mb_gk20a(void)
|
||||
* When not using unified address spaces, the bottom 56GB of the space are used
|
||||
* for small pages, and the remaining high memory is used for large pages.
|
||||
*/
|
||||
static inline u64 __nv_gmmu_va_small_page_limit(void)
|
||||
static inline u64 nvgpu_gmmu_va_small_page_limit(void)
|
||||
{
|
||||
return ((u64)SZ_1G * 56);
|
||||
return ((u64)SZ_1G * 56U);
|
||||
}
|
||||
|
||||
u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
|
||||
u64 base, u64 size);
|
||||
u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
|
||||
void nvgpu_init_mm_ce_context(struct gk20a *g);
|
||||
int nvgpu_init_mm_support(struct gk20a *g);
|
||||
|
||||
@@ -56,7 +56,7 @@ enum nvgpu_aperture {
|
||||
APERTURE_SYSMEM,
|
||||
|
||||
/* Don't use directly. Use APERTURE_SYSMEM, this is used internally. */
|
||||
__APERTURE_SYSMEM_COH,
|
||||
APERTURE_SYSMEM_COH,
|
||||
|
||||
APERTURE_VIDMEM
|
||||
};
|
||||
@@ -211,7 +211,7 @@ static inline const char *nvgpu_aperture_str(struct gk20a *g,
|
||||
return "INVAL";
|
||||
case APERTURE_SYSMEM:
|
||||
return "SYSMEM";
|
||||
case __APERTURE_SYSMEM_COH:
|
||||
case APERTURE_SYSMEM_COH:
|
||||
return "SYSCOH";
|
||||
case APERTURE_VIDMEM:
|
||||
return "VIDMEM";
|
||||
@@ -340,7 +340,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 nvgpu_aperture_mask_coh(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);
|
||||
|
||||
Reference in New Issue
Block a user