gpu: nvgpu: gr: fix MISRA 10.4 violations

MISRA Rule 10.4 requires both operands of an operator in which the
usual arithmetic conversions are performed shall have the same
essential type category.

JIRA NVGPU-3159

Change-Id: I7d864f407feadeb7ffed3922d68830aed777ce6f
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2104522
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-04-24 09:31:18 -04:00
committed by mobile promotions
parent 1a46d4efca
commit 596cf7241f
8 changed files with 56 additions and 61 deletions

View File

@@ -49,7 +49,7 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
} }
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc, void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
enum nvgpu_gr_ctx_index index, u32 size) u32 index, u32 size)
{ {
gr_ctx_desc->size[index] = size; gr_ctx_desc->size[index] = size;
} }
@@ -330,13 +330,12 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
struct vm_gk20a *vm) struct vm_gk20a *vm)
{ {
u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va; u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va;
enum nvgpu_gr_global_ctx_index *g_bfr_index = u32 *g_bfr_index = gr_ctx->global_ctx_buffer_index;
gr_ctx->global_ctx_buffer_index;
u32 i; u32 i;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) { for (i = 0U; i < NVGPU_GR_CTX_VA_COUNT; i++) {
nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer, nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer,
g_bfr_index[i], vm, g_bfr_va[i]); g_bfr_index[i], vm, g_bfr_va[i]);
} }
@@ -353,7 +352,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
struct vm_gk20a *vm, bool vpr) struct vm_gk20a *vm, bool vpr)
{ {
u64 *g_bfr_va; u64 *g_bfr_va;
enum nvgpu_gr_global_ctx_index *g_bfr_index; u32 *g_bfr_index;
u64 gpu_va = 0ULL; u64 gpu_va = 0ULL;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -471,7 +470,7 @@ clean_up:
} }
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx, u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
enum nvgpu_gr_ctx_global_ctx_va index) u32 index)
{ {
return gr_ctx->global_ctx_buffer_va[index]; return gr_ctx->global_ctx_buffer_va[index];
} }

View File

@@ -24,7 +24,6 @@
#define NVGPU_GR_CTX_PRIV_H #define NVGPU_GR_CTX_PRIV_H
struct nvgpu_mem; struct nvgpu_mem;
enum nvgpu_gr_global_ctx_index;
struct patch_desc { struct patch_desc {
struct nvgpu_mem mem; struct nvgpu_mem mem;
@@ -78,7 +77,7 @@ struct nvgpu_gr_ctx {
#endif #endif
u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT]; u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT];
enum nvgpu_gr_global_ctx_index global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT]; u32 global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT];
bool global_ctx_buffer_mapped; bool global_ctx_buffer_mapped;
u32 tsgid; u32 tsgid;

View File

@@ -35,7 +35,8 @@ struct nvgpu_gr_global_ctx_buffer_desc *
nvgpu_gr_global_ctx_desc_alloc(struct gk20a *g) nvgpu_gr_global_ctx_desc_alloc(struct gk20a *g)
{ {
struct nvgpu_gr_global_ctx_buffer_desc *desc = struct nvgpu_gr_global_ctx_buffer_desc *desc =
nvgpu_kzalloc(g, sizeof(*desc) * NVGPU_GR_GLOBAL_CTX_COUNT); nvgpu_kzalloc(g, sizeof(*desc) *
U64(NVGPU_GR_GLOBAL_CTX_COUNT));
return desc; return desc;
} }
@@ -47,13 +48,13 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, size_t size) u32 index, size_t size)
{ {
desc[index].size = size; desc[index].size = size;
} }
size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index) u32 index)
{ {
return desc[index].size; return desc[index].size;
} }
@@ -69,7 +70,7 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
{ {
u32 i; u32 i;
for (i = 0U; i < NVGPU_GR_GLOBAL_CTX_COUNT; i++) { for (i = 0; i < NVGPU_GR_GLOBAL_CTX_COUNT; i++) {
if (desc[i].destroy != NULL) { if (desc[i].destroy != NULL) {
desc[i].destroy(g, &desc[i].mem); desc[i].destroy(g, &desc[i].mem);
desc[i].destroy = NULL; desc[i].destroy = NULL;
@@ -81,7 +82,7 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g, static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index) u32 index)
{ {
int err = 0; int err = 0;
@@ -104,7 +105,7 @@ static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g,
static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g, static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index) u32 index)
{ {
int err = 0; int err = 0;
@@ -207,7 +208,7 @@ clean_up:
} }
u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc, u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, u32 index,
struct vm_gk20a *vm, u32 flags, bool priv) struct vm_gk20a *vm, u32 flags, bool priv)
{ {
u64 gpu_va; u64 gpu_va;
@@ -224,7 +225,7 @@ u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
void nvgpu_gr_global_ctx_buffer_unmap( void nvgpu_gr_global_ctx_buffer_unmap(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, u32 index,
struct vm_gk20a *vm, u64 gpu_va) struct vm_gk20a *vm, u64 gpu_va)
{ {
if (nvgpu_mem_is_valid(&desc[index].mem)) { if (nvgpu_mem_is_valid(&desc[index].mem)) {
@@ -234,7 +235,7 @@ void nvgpu_gr_global_ctx_buffer_unmap(
struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem( struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index) u32 index)
{ {
if (nvgpu_mem_is_valid(&desc[index].mem)) { if (nvgpu_mem_is_valid(&desc[index].mem)) {
return &desc[index].mem; return &desc[index].mem;
@@ -244,7 +245,7 @@ struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
bool nvgpu_gr_global_ctx_buffer_ready( bool nvgpu_gr_global_ctx_buffer_ready(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index) u32 index)
{ {
if (nvgpu_mem_is_valid(&desc[index].mem)) { if (nvgpu_mem_is_valid(&desc[index].mem)) {
return true; return true;

View File

@@ -59,7 +59,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
if (g->ops.gr.config.get_gpc_mask != NULL) { if (g->ops.gr.config.get_gpc_mask != NULL) {
config->gpc_mask = g->ops.gr.config.get_gpc_mask(g, config); config->gpc_mask = g->ops.gr.config.get_gpc_mask(g, config);
} else { } else {
config->gpc_mask = BIT32(config->gpc_count) - 1; config->gpc_mask = BIT32(config->gpc_count) - 1U;
} }
config->pe_count_per_gpc = nvgpu_get_litter_value(g, config->pe_count_per_gpc = nvgpu_get_litter_value(g,

View File

@@ -2917,7 +2917,8 @@ fail:
return err; return err;
} }
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr, u32 bpt_event) bool gv11b_gr_esr_bpt_pending_events(u32 global_esr,
enum nvgpu_event_id_type bpt_event)
{ {
bool ret = false; bool ret = false;

View File

@@ -133,5 +133,6 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
u32 *priv_addr_table, u32 *priv_addr_table,
u32 *num_registers); u32 *num_registers);
void gr_gv11b_powergate_tpc(struct gk20a *g); void gr_gv11b_powergate_tpc(struct gk20a *g);
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr, u32 bpt_event); bool gv11b_gr_esr_bpt_pending_events(u32 global_esr,
enum nvgpu_event_id_type bpt_event);
#endif /* NVGPU_GR_GV11B_H */ #endif /* NVGPU_GR_GV11B_H */

View File

@@ -53,30 +53,26 @@ struct zcull_ctx_desc;
struct pm_ctx_desc; struct pm_ctx_desc;
struct nvgpu_gr_ctx_desc; struct nvgpu_gr_ctx_desc;
enum nvgpu_gr_ctx_index { #define NVGPU_GR_CTX_CTX 0U
NVGPU_GR_CTX_CTX = 0, #define NVGPU_GR_CTX_PM_CTX 1U
NVGPU_GR_CTX_PM_CTX , #define NVGPU_GR_CTX_PATCH_CTX 2U
NVGPU_GR_CTX_PATCH_CTX , #define NVGPU_GR_CTX_PREEMPT_CTXSW 3U
NVGPU_GR_CTX_PREEMPT_CTXSW , #define NVGPU_GR_CTX_SPILL_CTXSW 4U
NVGPU_GR_CTX_SPILL_CTXSW , #define NVGPU_GR_CTX_BETACB_CTXSW 5U
NVGPU_GR_CTX_BETACB_CTXSW , #define NVGPU_GR_CTX_PAGEPOOL_CTXSW 6U
NVGPU_GR_CTX_PAGEPOOL_CTXSW , #define NVGPU_GR_CTX_GFXP_RTVCB_CTXSW 7U
NVGPU_GR_CTX_GFXP_RTVCB_CTXSW , #define NVGPU_GR_CTX_COUNT 8U
NVGPU_GR_CTX_COUNT
};
/* /*
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA * either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA
*/ */
enum nvgpu_gr_ctx_global_ctx_va { #define NVGPU_GR_CTX_CIRCULAR_VA 0U
NVGPU_GR_CTX_CIRCULAR_VA = 0, #define NVGPU_GR_CTX_PAGEPOOL_VA 1U
NVGPU_GR_CTX_PAGEPOOL_VA = 1, #define NVGPU_GR_CTX_ATTRIBUTE_VA 2U
NVGPU_GR_CTX_ATTRIBUTE_VA = 2, #define NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA 3U
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA = 3, #define NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA 4U
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA = 4, #define NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA 5U
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA = 5, #define NVGPU_GR_CTX_VA_COUNT 6U
NVGPU_GR_CTX_VA_COUNT = 6
};
/* PM Context Switch Mode */ /* PM Context Switch Mode */
/*This mode says that the pms are not to be context switched. */ /*This mode says that the pms are not to be context switched. */
@@ -91,7 +87,7 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
struct nvgpu_gr_ctx_desc *desc); struct nvgpu_gr_ctx_desc *desc);
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc, void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
enum nvgpu_gr_ctx_index index, u32 size); u32 index, u32 size);
int nvgpu_gr_ctx_alloc(struct gk20a *g, int nvgpu_gr_ctx_alloc(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_gr_ctx *gr_ctx,
@@ -131,7 +127,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
struct vm_gk20a *vm, bool vpr); struct vm_gk20a *vm, bool vpr);
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx, u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
enum nvgpu_gr_ctx_global_ctx_va index); u32 index);
struct nvgpu_mem *nvgpu_gr_ctx_get_spill_ctxsw_buffer( struct nvgpu_mem *nvgpu_gr_ctx_get_spill_ctxsw_buffer(
struct nvgpu_gr_ctx *gr_ctx); struct nvgpu_gr_ctx *gr_ctx);

View File

@@ -35,18 +35,16 @@ struct nvgpu_gr_global_ctx_local_golden_image;
typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g, typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
struct nvgpu_mem *mem); struct nvgpu_mem *mem);
enum nvgpu_gr_global_ctx_index { #define NVGPU_GR_GLOBAL_CTX_CIRCULAR 0U
NVGPU_GR_GLOBAL_CTX_CIRCULAR = 0, #define NVGPU_GR_GLOBAL_CTX_PAGEPOOL 1U
NVGPU_GR_GLOBAL_CTX_PAGEPOOL = 1, #define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE 2U
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE = 2, #define NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR 3U
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR = 3, #define NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR 4U
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR = 4, #define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR 5U
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR = 5, #define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP 6U
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP = 6, #define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER 7U
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER = 7, #define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER 8U
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER = 8, #define NVGPU_GR_GLOBAL_CTX_COUNT 9U
NVGPU_GR_GLOBAL_CTX_COUNT = 9
};
struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_global_ctx_desc_alloc( struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_global_ctx_desc_alloc(
struct gk20a *g); struct gk20a *g);
@@ -54,9 +52,9 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc); struct nvgpu_gr_global_ctx_buffer_desc *desc);
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, size_t size); u32 index, size_t size);
size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index); u32 index);
int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g, int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc); struct nvgpu_gr_global_ctx_buffer_desc *desc);
@@ -64,19 +62,19 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc); struct nvgpu_gr_global_ctx_buffer_desc *desc);
u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc, u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, u32 index,
struct vm_gk20a *vm, u32 flags, bool priv); struct vm_gk20a *vm, u32 flags, bool priv);
void nvgpu_gr_global_ctx_buffer_unmap( void nvgpu_gr_global_ctx_buffer_unmap(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index, u32 index,
struct vm_gk20a *vm, u64 gpu_va); struct vm_gk20a *vm, u64 gpu_va);
struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem( struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index); u32 index);
bool nvgpu_gr_global_ctx_buffer_ready( bool nvgpu_gr_global_ctx_buffer_ready(
struct nvgpu_gr_global_ctx_buffer_desc *desc, struct nvgpu_gr_global_ctx_buffer_desc *desc,
enum nvgpu_gr_global_ctx_index index); u32 index);
struct nvgpu_gr_global_ctx_local_golden_image * struct nvgpu_gr_global_ctx_local_golden_image *
nvgpu_gr_global_ctx_init_local_golden_image(struct gk20a *g, nvgpu_gr_global_ctx_init_local_golden_image(struct gk20a *g,