gpu: nvgpu: fixing MISRA Rule 21.2 violation

- "va_start", "time" a reserved identifiers or macro names described
  in Section 7, "Library", of the C standard, shall not be declared.

JIRA NVGPU-6536

Change-Id: I868362819dd7178eb7b165f243fb6d36322d8372
Signed-off-by: srajum <srajum@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2582291
(cherry picked from commit 29c2c55b184cf16aee51614da895747750217885)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2674867
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
srajum
2021-08-24 19:24:44 +05:30
committed by mobile promotions
parent 41a1212744
commit 8e56c73eab
7 changed files with 14 additions and 14 deletions

View File

@@ -640,7 +640,7 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
* is set. * is set.
*/ */
if (!big_pages || unified_va) { if (!big_pages || unified_va) {
user_vma_start = vm->va_start; user_vma_start = vm->virtaddr_start;
user_vma_limit = nvgpu_safe_sub_u64(vm->va_limit, user_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
kernel_reserved); kernel_reserved);
user_lp_vma_start = user_vma_limit; user_lp_vma_start = user_vma_limit;
@@ -650,14 +650,14 @@ static int nvgpu_vm_init_vma(struct gk20a *g, struct vm_gk20a *vm,
* Ensure small_big_split falls between user vma * Ensure small_big_split falls between user vma
* start and end. * start and end.
*/ */
if ((small_big_split <= vm->va_start) || if ((small_big_split <= vm->virtaddr_start) ||
(small_big_split >= (small_big_split >=
nvgpu_safe_sub_u64(vm->va_limit, nvgpu_safe_sub_u64(vm->va_limit,
kernel_reserved))) { kernel_reserved))) {
return -EINVAL; return -EINVAL;
} }
user_vma_start = vm->va_start; user_vma_start = vm->virtaddr_start;
user_vma_limit = small_big_split; user_vma_limit = small_big_split;
user_lp_vma_start = small_big_split; user_lp_vma_start = small_big_split;
user_lp_vma_limit = nvgpu_safe_sub_u64(vm->va_limit, user_lp_vma_limit = nvgpu_safe_sub_u64(vm->va_limit,
@@ -759,7 +759,7 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp; vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
} }
vm->va_start = low_hole; vm->virtaddr_start = low_hole;
vm->va_limit = aperture_size; vm->va_limit = aperture_size;
vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU VM * Virtualized GPU VM
* *
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -48,7 +48,7 @@ int vgpu_vm_as_alloc_share(struct gk20a *g, struct vm_gk20a *vm)
msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE; msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
p->va_start = vm->va_start; p->va_start = vm->virtaddr_start;
p->va_limit = vm->va_limit; p->va_limit = vm->va_limit;
p->big_page_size = vm->big_page_size; p->big_page_size = vm->big_page_size;

View File

@@ -48,7 +48,7 @@
({ \ ({ \
const struct nvgpu_timeout *t_ptr = (timeout); \ const struct nvgpu_timeout *t_ptr = (timeout); \
int ret_cpu = 0; \ int ret_cpu = 0; \
if (nvgpu_current_time_ns() > t_ptr->time) { \ if (nvgpu_current_time_ns() > t_ptr->time_duration) { \
if ((t_ptr->flags & NVGPU_TIMER_SILENT_TIMEOUT) == 0U) { \ if ((t_ptr->flags & NVGPU_TIMER_SILENT_TIMEOUT) == 0U) { \
nvgpu_err(t_ptr->g, "Timeout detected @ %p" fmt, \ nvgpu_err(t_ptr->g, "Timeout detected @ %p" fmt, \
caller, ##arg); \ caller, ##arg); \

View File

@@ -68,7 +68,7 @@ struct nvgpu_timeout {
* Timeout duration/count. * Timeout duration/count.
*/ */
union { union {
s64 time; s64 time_duration;
struct { struct {
u32 max_attempts; u32 max_attempts;
u32 attempted; u32 attempted;

View File

@@ -240,7 +240,7 @@ struct vm_gk20a {
char name[NVGPU_VM_NAME_LEN]; char name[NVGPU_VM_NAME_LEN];
/** Start GPU address of the context. */ /** Start GPU address of the context. */
u64 va_start; u64 virtaddr_start;
/** End GPU address of the context. */ /** End GPU address of the context. */
u64 va_limit; u64 va_limit;

View File

@@ -97,7 +97,7 @@ int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
if (flags & NVGPU_TIMER_RETRY_TIMER) if (flags & NVGPU_TIMER_RETRY_TIMER)
timeout->retries.max_attempts = duration; timeout->retries.max_attempts = duration;
else else
timeout->time = ktime_to_ns(ktime_add_ns(ktime_get(), timeout->time_duration = ktime_to_ns(ktime_add_ns(ktime_get(),
(s64)NSEC_PER_MSEC * duration)); (s64)NSEC_PER_MSEC * duration));
return 0; return 0;
@@ -113,7 +113,7 @@ static int nvgpu_timeout_expired_msg_cpu(struct nvgpu_timeout *timeout,
if (nvgpu_timeout_is_pre_silicon(timeout)) if (nvgpu_timeout_is_pre_silicon(timeout))
return 0; return 0;
if (ktime_after(now, ns_to_ktime(timeout->time))) { if (ktime_after(now, ns_to_ktime(timeout->time_duration))) {
if (!(timeout->flags & NVGPU_TIMER_SILENT_TIMEOUT)) { if (!(timeout->flags & NVGPU_TIMER_SILENT_TIMEOUT)) {
char buf[128]; char buf[128];
@@ -204,7 +204,7 @@ bool nvgpu_timeout_peek_expired(struct nvgpu_timeout *timeout)
return timeout->retries.attempted >= return timeout->retries.attempted >=
timeout->retries.max_attempts; timeout->retries.max_attempts;
else else
return ktime_after(ktime_get(), ns_to_ktime(timeout->time)); return ktime_after(ktime_get(), ns_to_ktime(timeout->time_duration));
} }
/** /**

View File

@@ -177,7 +177,7 @@ int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
} else { } else {
duration_ns = (s64)duration; duration_ns = (s64)duration;
duration_ns = nvgpu_safe_mult_s64(duration_ns, NSEC_PER_MSEC); duration_ns = nvgpu_safe_mult_s64(duration_ns, NSEC_PER_MSEC);
timeout->time = nvgpu_safe_add_s64(nvgpu_current_time_ns(), timeout->time_duration = nvgpu_safe_add_s64(nvgpu_current_time_ns(),
duration_ns); duration_ns);
} }
@@ -190,7 +190,7 @@ bool nvgpu_timeout_peek_expired(struct nvgpu_timeout *timeout)
return (timeout->retries.attempted >= return (timeout->retries.attempted >=
timeout->retries.max_attempts); timeout->retries.max_attempts);
} else { } else {
return time_after(get_time_ns(), timeout->time); return time_after(get_time_ns(), timeout->time_duration);
} }
} }