diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c index d034f2d33..7f2f677d2 100644 --- a/drivers/gpu/nvgpu/common/fifo/submit.c +++ b/drivers/gpu/nvgpu/common/fifo/submit.c @@ -179,7 +179,7 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c, trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0, (u32 *)cmd->mem->cpu_va + cmd->off); - c->gpfifo.put = (c->gpfifo.put + 1) & (c->gpfifo.entry_num - 1); + c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); } static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, @@ -286,7 +286,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c, trace_write_pushbuffers(c, num_entries); c->gpfifo.put = (c->gpfifo.put + num_entries) & - (c->gpfifo.entry_num - 1); + (c->gpfifo.entry_num - 1U); return 0; } @@ -307,7 +307,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, struct channel_gk20a_job *job = NULL; /* we might need two extra gpfifo entries - one for pre fence * and one for post fence. */ - const int extra_entries = 2; + const u32 extra_entries = 2U; bool skip_buffer_refcounting = (flags & NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING); int err = 0; @@ -330,7 +330,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, * Kernel can insert gpfifo entries before and after user gpfifos. * So, add extra_entries in user request. Also, HW with fifo size N * can accept only N-1 entreis and so the below condition */ - if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { + if (c->gpfifo.entry_num - 1U < num_entries + extra_entries) { nvgpu_err(g, "not enough gpfifo space allocated"); return -ENOMEM; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index d72629b55..86e56d9eb 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -512,7 +512,7 @@ int nvgpu_pmu_destroy(struct gk20a *g) { struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats_data pg_stat_data = { 0 }; - int i; + u32 i; nvgpu_log_fn(g, " "); @@ -539,7 +539,7 @@ int nvgpu_pmu_destroy(struct gk20a *g) pmu->isr_enabled = false; nvgpu_mutex_release(&pmu->isr_mutex); - for (i = 0; i < PMU_QUEUE_COUNT; i++) { + for (i = 0U; i < PMU_QUEUE_COUNT; i++) { nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]); } @@ -559,7 +559,7 @@ void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, { fb->address.lo = u64_lo32(mem->gpu_va); fb->address.hi = u64_hi32(mem->gpu_va); - fb->params = ((u32)mem->size & 0xFFFFFF); + fb->params = ((u32)mem->size & 0xFFFFFFU); fb->params |= (GK20A_PMU_DMAIDX_VIRT << 24); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 87fd2f2aa..bf54e0d6f 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -37,12 +37,12 @@ #define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" /* PMU F/W version */ -#define APP_VERSION_GPU_NEXT 24313845 -#define APP_VERSION_GV11B 24379482 -#define APP_VERSION_GV10X 23647491 -#define APP_VERSION_GP10X 24076634 -#define APP_VERSION_GP10B 23782727 -#define APP_VERSION_GM20B 20490253 +#define APP_VERSION_GPU_NEXT 24313845U +#define APP_VERSION_GV11B 24379482U +#define APP_VERSION_GV10X 23647491U +#define APP_VERSION_GP10X 24076634U +#define APP_VERSION_GP10B 23782727U +#define APP_VERSION_GM20B 20490253U /* PMU version specific functions */ static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) @@ -82,7 +82,7 @@ static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) { - pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; + pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U; pmu->args_v4.dma_addr.dma_base1 = 0; pmu->args_v4.dma_addr.dma_offset = 0; } @@ -182,7 +182,7 @@ static void set_pmu_cmdline_args_falctracesize_v3( static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) { - pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; + pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100U; } static void set_pmu_cmdline_args_falctracedmaidx_v3( @@ -882,7 +882,7 @@ static void get_pmu_init_msg_pmu_queue_params_v4( queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } @@ -911,7 +911,7 @@ static void get_pmu_init_msg_pmu_queue_params_v5( queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } @@ -940,7 +940,7 @@ static void get_pmu_init_msg_pmu_queue_params_v3( } queue->index = init->queue_index[tmp_id]; queue->size = init->queue_size[tmp_id]; - if (tmp_id != 0) { + if (tmp_id != 0U) { for (i = 0 ; i < tmp_id; i++) { current_ptr += init->queue_size[i]; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 39be07cc2..68654a707 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -184,9 +184,9 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, goto invalid_cmd; } - if ((payload->in.buf != NULL && payload->in.size == 0) || - (payload->out.buf != NULL && payload->out.size == 0) || - (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) { + if ((payload->in.buf != NULL && payload->in.size == 0U) || + (payload->out.buf != NULL && payload->out.size == 0U) || + (payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0U)) { goto invalid_cmd; } @@ -207,8 +207,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } - if ((payload->in.offset != 0 && payload->in.buf == NULL) || - (payload->out.offset != 0 && payload->out.buf == NULL)) { + if ((payload->in.offset != 0U && payload->in.buf == NULL) || + (payload->out.offset != 0U && payload->out.buf == NULL)) { goto invalid_cmd; } @@ -316,7 +316,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, seq->out_payload = payload->out.buf; } - if (payload && payload->in.offset != 0) { + if (payload && payload->in.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &in, ((u8 *)&cmd->cmd + payload->in.offset)); @@ -335,7 +335,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, goto clean_up; } - if (payload->in.fb_size != 0x0) { + if (payload->in.fb_size != 0x0U) { seq->in_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); if (!seq->in_mem) { @@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, pv->pmu_allocation_get_dmem_offset(pmu, in)); } - if (payload && payload->out.offset != 0) { + if (payload && payload->out.offset != 0U) { pv->set_pmu_allocation_ptr(pmu, &out, ((u8 *)&cmd->cmd + payload->out.offset)); pv->pmu_allocation_set_dmem_size(pmu, out, @@ -381,7 +381,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd, goto clean_up; } - if (payload->out.fb_size != 0x0) { + if (payload->out.fb_size != 0x0U) { seq->out_mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem)); if (!seq->out_mem) { @@ -534,7 +534,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, } } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_out_a_ptr(seq)) != 0) { + pv->get_pmu_seq_out_a_ptr(seq)) != 0U) { nvgpu_flcn_copy_from_dmem(pmu->flcn, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_out_a_ptr(seq)), @@ -546,13 +546,13 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu, seq->callback = NULL; } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_in_a_ptr(seq)) != 0) { + pv->get_pmu_seq_in_a_ptr(seq)) != 0U) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_in_a_ptr(seq))); } if (pv->pmu_allocation_get_dmem_size(pmu, - pv->get_pmu_seq_out_a_ptr(seq)) != 0) { + pv->get_pmu_seq_out_a_ptr(seq)) != 0U) { nvgpu_free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, pv->get_pmu_seq_out_a_ptr(seq))); @@ -748,7 +748,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, gk20a_pmu_isr(g); } - nvgpu_usleep_range(delay, delay * 2); + nvgpu_usleep_range(delay, delay * 2U); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (!nvgpu_timeout_expired(&timeout)); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c index 73893f2c3..5d7365918 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_perfmon.c @@ -77,7 +77,7 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu) if (!pmu->sample_buffer) { pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, - 2 * sizeof(u16)); + 2U * sizeof(u16)); } if (!pmu->sample_buffer) { nvgpu_err(g, "failed to allocate perfmon sample buffer"); @@ -215,7 +215,7 @@ int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load) int nvgpu_pmu_load_update(struct gk20a *g) { struct nvgpu_pmu *pmu = &g->pmu; - u16 load = 0; + u32 load = 0; if (!pmu->perfmon_ready) { pmu->load_shadow = 0; @@ -231,8 +231,8 @@ int nvgpu_pmu_load_update(struct gk20a *g) (u8 *)&load, 2 * 1, 0); } - pmu->load_shadow = load / 10; - pmu->load_avg = (((9*pmu->load_avg) + pmu->load_shadow) / 10); + pmu->load_shadow = load / 10U; + pmu->load_avg = (((9U*pmu->load_avg) + pmu->load_shadow) / 10U); return 0; } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c index 4978708c6..76ed06217 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c @@ -34,17 +34,17 @@ * ON => OFF is always synchronized */ /* elpg is off */ -#define PMU_ELPG_STAT_OFF 0 +#define PMU_ELPG_STAT_OFF 0U /* elpg is on */ -#define PMU_ELPG_STAT_ON 1 +#define PMU_ELPG_STAT_ON 1U /* elpg is off, ALLOW cmd has been sent, wait for ack */ -#define PMU_ELPG_STAT_ON_PENDING 2 +#define PMU_ELPG_STAT_ON_PENDING 2U /* elpg is on, DISALLOW cmd has been sent, wait for ack */ -#define PMU_ELPG_STAT_OFF_PENDING 3 +#define PMU_ELPG_STAT_OFF_PENDING 3U /* elpg is off, caller has requested on, but ALLOW * cmd hasn't been sent due to ENABLE_ALLOW delay */ -#define PMU_ELPG_STAT_OFF_ON_PENDING 4 +#define PMU_ELPG_STAT_OFF_ON_PENDING 4U #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) @@ -58,7 +58,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_log_fn(g, " "); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; @@ -174,7 +174,7 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg, pmu, &seq, ~0); - WARN_ON(status != 0); + WARN_ON(status != 0U); nvgpu_log_fn(g, "done"); return 0; @@ -368,7 +368,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_log_fn(g, " "); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "ELPG cmd aborted"); /* TBD: disable ELPG */ return; @@ -507,7 +507,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, nvgpu_pmu_dbg(g, "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); - if (status != 0) { + if (status != 0U) { nvgpu_err(g, "PGENG cmd aborted"); /* TBD: disable ELPG */ return; @@ -549,7 +549,7 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g) g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, u64_lo32(pmu->pg_buf.gpu_va)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, - (u8)(pmu->pg_buf.gpu_va & 0xFF)); + (u8)(pmu->pg_buf.gpu_va & 0xFFU)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, PMU_DMAIDX_VIRT); @@ -590,7 +590,7 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, u64_lo32(pmu->seq_buf.gpu_va)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, - (u8)(pmu->seq_buf.gpu_va & 0xFF)); + (u8)(pmu->seq_buf.gpu_va & 0xFFU)); g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, PMU_DMAIDX_VIRT); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index 28374b9d0..1240530f9 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h @@ -84,21 +84,21 @@ #define PMU_FALCON_REG_SIZE (32) /* Choices for pmu_state */ -#define PMU_STATE_OFF 0 /* PMU is off */ -#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ -#define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ -#define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ -#define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ -#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ -#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ -#define PMU_STATE_STARTED 7 /* Fully unitialized */ -#define PMU_STATE_EXIT 8 /* Exit PMU state machine */ +#define PMU_STATE_OFF 0U /* PMU is off */ +#define PMU_STATE_STARTING 1U /* PMU is on, but not booted */ +#define PMU_STATE_INIT_RECEIVED 2U /* PMU init message received */ +#define PMU_STATE_ELPG_BOOTING 3U /* PMU is booting */ +#define PMU_STATE_ELPG_BOOTED 4U /* ELPG is initialized */ +#define PMU_STATE_LOADING_PG_BUF 5U /* Loading PG buf */ +#define PMU_STATE_LOADING_ZBC 6U /* Loading ZBC buf */ +#define PMU_STATE_STARTED 7U /* Fully unitialized */ +#define PMU_STATE_EXIT 8U /* Exit PMU state machine */ -#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 -#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 +#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32U +#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64U -#define PMU_MAX_NUM_SEQUENCES (256) -#define PMU_SEQ_BIT_SHIFT (5) +#define PMU_MAX_NUM_SEQUENCES (256U) +#define PMU_SEQ_BIT_SHIFT (5U) #define PMU_SEQ_TBL_SIZE \ (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) @@ -132,8 +132,8 @@ enum { #define PMU_PG_LPWR_FEATURE_RPPG 0x0 #define PMU_PG_LPWR_FEATURE_MSCG 0x1 -#define PMU_MSCG_DISABLED 0 -#define PMU_MSCG_ENABLED 1 +#define PMU_MSCG_DISABLED 0U +#define PMU_MSCG_ENABLED 1U /* Default Sampling Period of AELPG */ #define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000) @@ -350,7 +350,7 @@ struct nvgpu_pmu { u32 mscg_stat; u32 mscg_transition_state; - int pmu_state; + u32 pmu_state; #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ struct nvgpu_pg_init pg_init; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h index 68df80b49..e3317805b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_cmn.h @@ -32,7 +32,7 @@ #define PMU_COMMAND_QUEUE_LPQ 1U /* write by pmu, read by sw, accessed by interrupt handler, no lock */ #define PMU_MESSAGE_QUEUE 4U -#define PMU_QUEUE_COUNT 5 +#define PMU_QUEUE_COUNT 5U #define PMU_IS_COMMAND_QUEUE(id) \ ((id) < PMU_MESSAGE_QUEUE) diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h index 91e89365d..ba6e9ec85 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_perfmon.h @@ -32,7 +32,7 @@ #define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002) #define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004) -#define NV_PMU_PERFMON_MAX_COUNTERS 10 +#define NV_PMU_PERFMON_MAX_COUNTERS 10U enum pmu_perfmon_cmd_start_fields { COUNTER_ALLOC diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h index 1ba9963ce..c156a6c07 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pg.h @@ -28,9 +28,9 @@ /*PG defines*/ /* Identifier for each PG */ -#define PMU_PG_ELPG_ENGINE_ID_GRAPHICS (0x00000000) -#define PMU_PG_ELPG_ENGINE_ID_MS (0x00000004) -#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000005) +#define PMU_PG_ELPG_ENGINE_ID_GRAPHICS (0x00000000U) +#define PMU_PG_ELPG_ENGINE_ID_MS (0x00000004U) +#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000005U) #define PMU_PG_ELPG_ENGINE_MAX PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE /* PG message */ @@ -173,23 +173,23 @@ enum { SLOWDOWN_FACTOR_FPDIV_BYMAX, }; -#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0 -#define PMU_PG_PARAM_CMD_MS_INIT_PARAM 0x01 -#define PMU_PG_PARAM_CMD_MCLK_CHANGE 0x04 -#define PMU_PG_PARAM_CMD_POST_INIT 0x06 -#define PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE 0x07 +#define PMU_PG_PARAM_CMD_GR_INIT_PARAM 0x0U +#define PMU_PG_PARAM_CMD_MS_INIT_PARAM 0x01U +#define PMU_PG_PARAM_CMD_MCLK_CHANGE 0x04U +#define PMU_PG_PARAM_CMD_POST_INIT 0x06U +#define PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE 0x07U -#define NVGPU_PMU_GR_FEATURE_MASK_SDIV_SLOWDOWN (1 << 0) -#define NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING (1 << 2) -#define NVGPU_PMU_GR_FEATURE_MASK_RPPG (1 << 3) -#define NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING (1 << 5) -#define NVGPU_PMU_GR_FEATURE_MASK_UNBIND (1 << 6) -#define NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE (1 << 7) -#define NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY (1 << 8) -#define NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE (1 << 9) -#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM (1 << 10) -#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC (1 << 11) -#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG (1 << 12) +#define NVGPU_PMU_GR_FEATURE_MASK_SDIV_SLOWDOWN BIT32(0) +#define NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING BIT32(2) +#define NVGPU_PMU_GR_FEATURE_MASK_RPPG BIT32(3) +#define NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING BIT32(5) +#define NVGPU_PMU_GR_FEATURE_MASK_UNBIND BIT32(6) +#define NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE BIT32(7) +#define NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY BIT32(8) +#define NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE BIT32(9) +#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM BIT32(10) +#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC BIT32(11) +#define NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG BIT32(12) #define NVGPU_PMU_GR_FEATURE_MASK_ALL \ ( \ @@ -206,10 +206,10 @@ enum { NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG \ ) -#define NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING (1 << 0) -#define NVGPU_PMU_MS_FEATURE_MASK_SW_ASR (1 << 1) -#define NVGPU_PMU_MS_FEATURE_MASK_RPPG (1 << 8) -#define NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING (1 << 5) +#define NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING BIT32(0) +#define NVGPU_PMU_MS_FEATURE_MASK_SW_ASR BIT32(1) +#define NVGPU_PMU_MS_FEATURE_MASK_RPPG BIT32(8) +#define NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING BIT32(5) #define NVGPU_PMU_MS_FEATURE_MASK_ALL \ ( \ diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h index 1a05ec290..06486006a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h @@ -59,7 +59,7 @@ struct nv_pmu_rpc_cmd { u32 rpc_dmem_ptr; }; -#define NV_PMU_RPC_CMD_ID 0x80 +#define NV_PMU_RPC_CMD_ID 0x80U /* Message carrying the result of the RPC execution */ struct nv_pmu_rpc_msg { @@ -79,7 +79,7 @@ struct nv_pmu_rpc_msg { u32 rpc_dmem_ptr; }; -#define NV_PMU_RPC_MSG_ID 0x80 +#define NV_PMU_RPC_MSG_ID 0x80U struct pmu_cmd { struct pmu_hdr hdr; @@ -116,26 +116,26 @@ struct pmu_msg { } msg; }; -#define PMU_UNIT_REWIND (0x00) -#define PMU_UNIT_PG (0x03) -#define PMU_UNIT_INIT (0x07) -#define PMU_UNIT_ACR (0x0A) -#define PMU_UNIT_PERFMON_T18X (0x11) -#define PMU_UNIT_PERFMON (0x12) -#define PMU_UNIT_PERF (0x13) -#define PMU_UNIT_RC (0x1F) -#define PMU_UNIT_FECS_MEM_OVERRIDE (0x1E) -#define PMU_UNIT_CLK (0x0D) -#define PMU_UNIT_THERM (0x14) -#define PMU_UNIT_PMGR (0x18) -#define PMU_UNIT_VOLT (0x0E) +#define PMU_UNIT_REWIND (0x00U) +#define PMU_UNIT_PG (0x03U) +#define PMU_UNIT_INIT (0x07U) +#define PMU_UNIT_ACR (0x0AU) +#define PMU_UNIT_PERFMON_T18X (0x11U) +#define PMU_UNIT_PERFMON (0x12U) +#define PMU_UNIT_PERF (0x13U) +#define PMU_UNIT_RC (0x1FU) +#define PMU_UNIT_FECS_MEM_OVERRIDE (0x1EU) +#define PMU_UNIT_CLK (0x0DU) +#define PMU_UNIT_THERM (0x14U) +#define PMU_UNIT_PMGR (0x18U) +#define PMU_UNIT_VOLT (0x0EU) -#define PMU_UNIT_END (0x23) -#define PMU_UNIT_INVALID (0xFF) +#define PMU_UNIT_END (0x23U) +#define PMU_UNIT_INVALID (0xFFU) -#define PMU_UNIT_TEST_START (0xFE) -#define PMU_UNIT_END_SIM (0xFF) -#define PMU_UNIT_TEST_END (0xFF) +#define PMU_UNIT_TEST_START (0xFEU) +#define PMU_UNIT_END_SIM (0xFFU) +#define PMU_UNIT_TEST_END (0xFFU) #define PMU_UNIT_ID_IS_VALID(id) \ (((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START))