mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: gr: fix MISRA 10.4 violations
MISRA Rule 10.4 requires both operands of an operator in which the usual arithmetic conversions are performed shall have the same essential type category. JIRA NVGPU-3159 Change-Id: I7d864f407feadeb7ffed3922d68830aed777ce6f Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2104522 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1a46d4efca
commit
596cf7241f
@@ -49,7 +49,7 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
|
||||
}
|
||||
|
||||
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
||||
enum nvgpu_gr_ctx_index index, u32 size)
|
||||
u32 index, u32 size)
|
||||
{
|
||||
gr_ctx_desc->size[index] = size;
|
||||
}
|
||||
@@ -330,13 +330,12 @@ static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
||||
struct vm_gk20a *vm)
|
||||
{
|
||||
u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va;
|
||||
enum nvgpu_gr_global_ctx_index *g_bfr_index =
|
||||
gr_ctx->global_ctx_buffer_index;
|
||||
u32 *g_bfr_index = gr_ctx->global_ctx_buffer_index;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
|
||||
for (i = 0U; i < NVGPU_GR_CTX_VA_COUNT; i++) {
|
||||
nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer,
|
||||
g_bfr_index[i], vm, g_bfr_va[i]);
|
||||
}
|
||||
@@ -353,7 +352,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
struct vm_gk20a *vm, bool vpr)
|
||||
{
|
||||
u64 *g_bfr_va;
|
||||
enum nvgpu_gr_global_ctx_index *g_bfr_index;
|
||||
u32 *g_bfr_index;
|
||||
u64 gpu_va = 0ULL;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -471,7 +470,7 @@ clean_up:
|
||||
}
|
||||
|
||||
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
|
||||
enum nvgpu_gr_ctx_global_ctx_va index)
|
||||
u32 index)
|
||||
{
|
||||
return gr_ctx->global_ctx_buffer_va[index];
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#define NVGPU_GR_CTX_PRIV_H
|
||||
|
||||
struct nvgpu_mem;
|
||||
enum nvgpu_gr_global_ctx_index;
|
||||
|
||||
struct patch_desc {
|
||||
struct nvgpu_mem mem;
|
||||
@@ -78,7 +77,7 @@ struct nvgpu_gr_ctx {
|
||||
#endif
|
||||
|
||||
u64 global_ctx_buffer_va[NVGPU_GR_CTX_VA_COUNT];
|
||||
enum nvgpu_gr_global_ctx_index global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT];
|
||||
u32 global_ctx_buffer_index[NVGPU_GR_CTX_VA_COUNT];
|
||||
bool global_ctx_buffer_mapped;
|
||||
|
||||
u32 tsgid;
|
||||
|
||||
@@ -35,7 +35,8 @@ struct nvgpu_gr_global_ctx_buffer_desc *
|
||||
nvgpu_gr_global_ctx_desc_alloc(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc =
|
||||
nvgpu_kzalloc(g, sizeof(*desc) * NVGPU_GR_GLOBAL_CTX_COUNT);
|
||||
nvgpu_kzalloc(g, sizeof(*desc) *
|
||||
U64(NVGPU_GR_GLOBAL_CTX_COUNT));
|
||||
return desc;
|
||||
}
|
||||
|
||||
@@ -47,13 +48,13 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
|
||||
|
||||
|
||||
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index, size_t size)
|
||||
u32 index, size_t size)
|
||||
{
|
||||
desc[index].size = size;
|
||||
}
|
||||
|
||||
size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index)
|
||||
u32 index)
|
||||
{
|
||||
return desc[index].size;
|
||||
}
|
||||
@@ -69,7 +70,7 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0U; i < NVGPU_GR_GLOBAL_CTX_COUNT; i++) {
|
||||
for (i = 0; i < NVGPU_GR_GLOBAL_CTX_COUNT; i++) {
|
||||
if (desc[i].destroy != NULL) {
|
||||
desc[i].destroy(g, &desc[i].mem);
|
||||
desc[i].destroy = NULL;
|
||||
@@ -81,7 +82,7 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
|
||||
|
||||
static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g,
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index)
|
||||
u32 index)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -104,7 +105,7 @@ static int nvgpu_gr_global_ctx_buffer_alloc_sys(struct gk20a *g,
|
||||
|
||||
static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g,
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index)
|
||||
u32 index)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -207,7 +208,7 @@ clean_up:
|
||||
}
|
||||
|
||||
u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index,
|
||||
u32 index,
|
||||
struct vm_gk20a *vm, u32 flags, bool priv)
|
||||
{
|
||||
u64 gpu_va;
|
||||
@@ -224,7 +225,7 @@ u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
|
||||
void nvgpu_gr_global_ctx_buffer_unmap(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index,
|
||||
u32 index,
|
||||
struct vm_gk20a *vm, u64 gpu_va)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(&desc[index].mem)) {
|
||||
@@ -234,7 +235,7 @@ void nvgpu_gr_global_ctx_buffer_unmap(
|
||||
|
||||
struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index)
|
||||
u32 index)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(&desc[index].mem)) {
|
||||
return &desc[index].mem;
|
||||
@@ -244,7 +245,7 @@ struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
|
||||
|
||||
bool nvgpu_gr_global_ctx_buffer_ready(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index)
|
||||
u32 index)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(&desc[index].mem)) {
|
||||
return true;
|
||||
|
||||
@@ -59,7 +59,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
|
||||
if (g->ops.gr.config.get_gpc_mask != NULL) {
|
||||
config->gpc_mask = g->ops.gr.config.get_gpc_mask(g, config);
|
||||
} else {
|
||||
config->gpc_mask = BIT32(config->gpc_count) - 1;
|
||||
config->gpc_mask = BIT32(config->gpc_count) - 1U;
|
||||
}
|
||||
|
||||
config->pe_count_per_gpc = nvgpu_get_litter_value(g,
|
||||
|
||||
@@ -2917,7 +2917,8 @@ fail:
|
||||
return err;
|
||||
}
|
||||
|
||||
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr, u32 bpt_event)
|
||||
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr,
|
||||
enum nvgpu_event_id_type bpt_event)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
|
||||
@@ -133,5 +133,6 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
|
||||
u32 *priv_addr_table,
|
||||
u32 *num_registers);
|
||||
void gr_gv11b_powergate_tpc(struct gk20a *g);
|
||||
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr, u32 bpt_event);
|
||||
bool gv11b_gr_esr_bpt_pending_events(u32 global_esr,
|
||||
enum nvgpu_event_id_type bpt_event);
|
||||
#endif /* NVGPU_GR_GV11B_H */
|
||||
|
||||
@@ -53,30 +53,26 @@ struct zcull_ctx_desc;
|
||||
struct pm_ctx_desc;
|
||||
struct nvgpu_gr_ctx_desc;
|
||||
|
||||
enum nvgpu_gr_ctx_index {
|
||||
NVGPU_GR_CTX_CTX = 0,
|
||||
NVGPU_GR_CTX_PM_CTX ,
|
||||
NVGPU_GR_CTX_PATCH_CTX ,
|
||||
NVGPU_GR_CTX_PREEMPT_CTXSW ,
|
||||
NVGPU_GR_CTX_SPILL_CTXSW ,
|
||||
NVGPU_GR_CTX_BETACB_CTXSW ,
|
||||
NVGPU_GR_CTX_PAGEPOOL_CTXSW ,
|
||||
NVGPU_GR_CTX_GFXP_RTVCB_CTXSW ,
|
||||
NVGPU_GR_CTX_COUNT
|
||||
};
|
||||
#define NVGPU_GR_CTX_CTX 0U
|
||||
#define NVGPU_GR_CTX_PM_CTX 1U
|
||||
#define NVGPU_GR_CTX_PATCH_CTX 2U
|
||||
#define NVGPU_GR_CTX_PREEMPT_CTXSW 3U
|
||||
#define NVGPU_GR_CTX_SPILL_CTXSW 4U
|
||||
#define NVGPU_GR_CTX_BETACB_CTXSW 5U
|
||||
#define NVGPU_GR_CTX_PAGEPOOL_CTXSW 6U
|
||||
#define NVGPU_GR_CTX_GFXP_RTVCB_CTXSW 7U
|
||||
#define NVGPU_GR_CTX_COUNT 8U
|
||||
|
||||
/*
|
||||
* either ATTRIBUTE or ATTRIBUTE_VPR maps to NVGPU_GR_CTX_ATTRIBUTE_VA
|
||||
*/
|
||||
enum nvgpu_gr_ctx_global_ctx_va {
|
||||
NVGPU_GR_CTX_CIRCULAR_VA = 0,
|
||||
NVGPU_GR_CTX_PAGEPOOL_VA = 1,
|
||||
NVGPU_GR_CTX_ATTRIBUTE_VA = 2,
|
||||
NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA = 3,
|
||||
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA = 4,
|
||||
NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA = 5,
|
||||
NVGPU_GR_CTX_VA_COUNT = 6
|
||||
};
|
||||
#define NVGPU_GR_CTX_CIRCULAR_VA 0U
|
||||
#define NVGPU_GR_CTX_PAGEPOOL_VA 1U
|
||||
#define NVGPU_GR_CTX_ATTRIBUTE_VA 2U
|
||||
#define NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA 3U
|
||||
#define NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA 4U
|
||||
#define NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA 5U
|
||||
#define NVGPU_GR_CTX_VA_COUNT 6U
|
||||
|
||||
/* PM Context Switch Mode */
|
||||
/*This mode says that the pms are not to be context switched. */
|
||||
@@ -91,7 +87,7 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx_desc *desc);
|
||||
|
||||
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
||||
enum nvgpu_gr_ctx_index index, u32 size);
|
||||
u32 index, u32 size);
|
||||
|
||||
int nvgpu_gr_ctx_alloc(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
@@ -131,7 +127,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
struct vm_gk20a *vm, bool vpr);
|
||||
|
||||
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
|
||||
enum nvgpu_gr_ctx_global_ctx_va index);
|
||||
u32 index);
|
||||
|
||||
struct nvgpu_mem *nvgpu_gr_ctx_get_spill_ctxsw_buffer(
|
||||
struct nvgpu_gr_ctx *gr_ctx);
|
||||
|
||||
@@ -35,18 +35,16 @@ struct nvgpu_gr_global_ctx_local_golden_image;
|
||||
typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
|
||||
struct nvgpu_mem *mem);
|
||||
|
||||
enum nvgpu_gr_global_ctx_index {
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR = 0,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL = 1,
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE = 2,
|
||||
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR = 3,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR = 4,
|
||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR = 5,
|
||||
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP = 6,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER = 7,
|
||||
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER = 8,
|
||||
NVGPU_GR_GLOBAL_CTX_COUNT = 9
|
||||
};
|
||||
#define NVGPU_GR_GLOBAL_CTX_CIRCULAR 0U
|
||||
#define NVGPU_GR_GLOBAL_CTX_PAGEPOOL 1U
|
||||
#define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE 2U
|
||||
#define NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR 3U
|
||||
#define NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR 4U
|
||||
#define NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR 5U
|
||||
#define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP 6U
|
||||
#define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER 7U
|
||||
#define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER 8U
|
||||
#define NVGPU_GR_GLOBAL_CTX_COUNT 9U
|
||||
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *nvgpu_gr_global_ctx_desc_alloc(
|
||||
struct gk20a *g);
|
||||
@@ -54,9 +52,9 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc);
|
||||
|
||||
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index, size_t size);
|
||||
u32 index, size_t size);
|
||||
size_t nvgpu_gr_global_ctx_get_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index);
|
||||
u32 index);
|
||||
|
||||
int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc);
|
||||
@@ -64,19 +62,19 @@ void nvgpu_gr_global_ctx_buffer_free(struct gk20a *g,
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc);
|
||||
|
||||
u64 nvgpu_gr_global_ctx_buffer_map(struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index,
|
||||
u32 index,
|
||||
struct vm_gk20a *vm, u32 flags, bool priv);
|
||||
void nvgpu_gr_global_ctx_buffer_unmap(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index,
|
||||
u32 index,
|
||||
struct vm_gk20a *vm, u64 gpu_va);
|
||||
|
||||
struct nvgpu_mem *nvgpu_gr_global_ctx_buffer_get_mem(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index);
|
||||
u32 index);
|
||||
bool nvgpu_gr_global_ctx_buffer_ready(
|
||||
struct nvgpu_gr_global_ctx_buffer_desc *desc,
|
||||
enum nvgpu_gr_global_ctx_index index);
|
||||
u32 index);
|
||||
|
||||
struct nvgpu_gr_global_ctx_local_golden_image *
|
||||
nvgpu_gr_global_ctx_init_local_golden_image(struct gk20a *g,
|
||||
|
||||
Reference in New Issue
Block a user