mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: MISRA 14.4 bitwise operation as boolean
MISRA rule 14.4 doesn't allow the usage of integer types as booleans in the controlling expression of an if statement or an iteration statement. Fix violations where the result of a bitwise operation is used as a boolean in the controlling expression of if and loop statements. JIRA NVGPU-1020 Change-Id: I6a756ee1bbb45d43f424d2251eebbc26278db417 Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1936334 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b68e465fab
commit
23f35e1b2f
@@ -1295,7 +1295,7 @@ void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status)
|
||||
fb_mmu_fault_status_dropped_other_phys_set_f()|
|
||||
fb_mmu_fault_status_dropped_other_virt_set_f();
|
||||
|
||||
if (fault_status & dropped_faults) {
|
||||
if ((fault_status & dropped_faults) != 0U) {
|
||||
nvgpu_err(g, "dropped mmu fault (0x%08x)",
|
||||
fault_status & dropped_faults);
|
||||
g->ops.fb.write_mmu_fault_status(g, dropped_faults);
|
||||
|
||||
@@ -1120,11 +1120,11 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
|
||||
gpfifo_size = args->num_gpfifo_entries;
|
||||
gpfifo_entry_size = nvgpu_get_gpfifo_entry_size();
|
||||
|
||||
if (args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR) {
|
||||
if ((args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR) != 0U) {
|
||||
c->vpr = true;
|
||||
}
|
||||
|
||||
if (args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC) {
|
||||
if ((args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC) != 0U) {
|
||||
nvgpu_rwsem_down_read(&g->deterministic_busy);
|
||||
/*
|
||||
* Railgating isn't deterministic; instead of disallowing
|
||||
@@ -1162,7 +1162,7 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
|
||||
goto clean_up_idle;
|
||||
}
|
||||
|
||||
if (args->flags & NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT) {
|
||||
if ((args->flags & NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT) != 0U) {
|
||||
if (g->os_channel.alloc_usermode_buffers != NULL) {
|
||||
err = g->os_channel.alloc_usermode_buffers(c, args);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -274,7 +274,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *na, u64 addr)
|
||||
|
||||
alloc_lock(na);
|
||||
|
||||
if (a->flags & GPU_ALLOC_NO_ALLOC_PAGE) {
|
||||
if ((a->flags & GPU_ALLOC_NO_ALLOC_PAGE) != 0ULL) {
|
||||
(void) WARN(1,
|
||||
"Using wrong free for NO_ALLOC_PAGE bitmap allocator");
|
||||
goto done;
|
||||
|
||||
@@ -917,11 +917,11 @@ static u64 nvgpu_balloc_fixed_buddy_locked(struct nvgpu_allocator *na,
|
||||
struct nvgpu_buddy_allocator *a = na->priv;
|
||||
|
||||
/* If base isn't aligned to an order 0 block, fail. */
|
||||
if (base & (a->blk_size - 1U)) {
|
||||
if ((base & (a->blk_size - 1ULL)) != 0ULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (len == 0U) {
|
||||
if (len == 0ULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
@@ -682,7 +682,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
|
||||
|
||||
page_size = vm->gmmu_page_sizes[attrs->pgsz];
|
||||
|
||||
if (space_to_skip & (U64(page_size) - U64(1))) {
|
||||
if ((space_to_skip & (U64(page_size) - U64(1))) != 0ULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -710,7 +710,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
|
||||
}
|
||||
alloc_unlock(na);
|
||||
|
||||
if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
|
||||
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
||||
return alloc->base;
|
||||
} else {
|
||||
return (u64) (uintptr_t) alloc;
|
||||
@@ -728,7 +728,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
|
||||
|
||||
alloc_lock(na);
|
||||
|
||||
if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
|
||||
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
||||
alloc = find_page_alloc(a, base);
|
||||
} else {
|
||||
alloc = find_page_alloc(a,
|
||||
@@ -841,7 +841,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na,
|
||||
a->nr_fixed_allocs++;
|
||||
a->pages_alloced += pages;
|
||||
|
||||
if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
|
||||
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
||||
return alloc->base;
|
||||
} else {
|
||||
return (u64) (uintptr_t) alloc;
|
||||
@@ -856,7 +856,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
|
||||
|
||||
alloc_lock(na);
|
||||
|
||||
if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
|
||||
if ((a->flags & GPU_ALLOC_NO_SCATTER_GATHER) != 0ULL) {
|
||||
alloc = find_page_alloc(a, base);
|
||||
if (alloc == NULL) {
|
||||
goto done;
|
||||
|
||||
@@ -913,7 +913,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
/*
|
||||
* Check if we should use a fixed offset for mapping this buffer.
|
||||
*/
|
||||
if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
|
||||
if ((flags & NVGPU_VM_MAP_FIXED_OFFSET) != 0U) {
|
||||
err = nvgpu_vm_area_validate_buffer(vm,
|
||||
map_addr,
|
||||
map_size,
|
||||
@@ -1212,7 +1212,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) {
|
||||
if ((mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) != 0U) {
|
||||
if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer) != 0) {
|
||||
/*
|
||||
* Looks like we have failed... Better not continue in
|
||||
|
||||
@@ -57,7 +57,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (map_addr & (U64(vm->gmmu_page_sizes[pgsz_idx]) - U64(1))) {
|
||||
if ((map_addr & (U64(vm->gmmu_page_sizes[pgsz_idx]) - U64(1))) != 0ULL) {
|
||||
nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
|
||||
map_addr);
|
||||
return -EINVAL;
|
||||
@@ -143,7 +143,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
|
||||
}
|
||||
|
||||
vma = vm->vma[pgsz_idx];
|
||||
if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
|
||||
if ((flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) != 0U) {
|
||||
vaddr_start = nvgpu_alloc_fixed(vma, our_addr,
|
||||
(u64)pages *
|
||||
(u64)page_size,
|
||||
@@ -168,7 +168,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
if (flags & NVGPU_VM_AREA_ALLOC_SPARSE) {
|
||||
if ((flags & NVGPU_VM_AREA_ALLOC_SPARSE) != 0U) {
|
||||
u64 map_addr = g->ops.mm.gmmu_map(vm, vaddr_start,
|
||||
NULL,
|
||||
0,
|
||||
|
||||
@@ -643,7 +643,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
|
||||
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) != 0U) {
|
||||
nvgpu_pmu_dbg(g, "pmu write flcn bl gen desc\n");
|
||||
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
|
||||
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
|
||||
|
||||
@@ -816,7 +816,7 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
|
||||
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) != 0U) {
|
||||
gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
|
||||
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
|
||||
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
|
||||
|
||||
@@ -673,7 +673,7 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
|
||||
pwr_falcon_irqstat_exterr_true_f() |
|
||||
pwr_falcon_irqstat_swgen0_true_f();
|
||||
|
||||
if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) {
|
||||
if ((gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) != 0U) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -278,8 +278,9 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
if (falconidmask == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS))) {
|
||||
if ((falconidmask &
|
||||
~(BIT32(LSF_FALCON_ID_FECS) |
|
||||
BIT32(LSF_FALCON_ID_GPCCS))) != 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
|
||||
@@ -181,8 +181,9 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
if (falconidmask == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS))) {
|
||||
if ((falconidmask &
|
||||
~(BIT32(LSF_FALCON_ID_FECS) |
|
||||
BIT32(LSF_FALCON_ID_GPCCS))) != 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
|
||||
@@ -57,8 +57,9 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS))) {
|
||||
if ((falconidmask &
|
||||
~(BIT32(LSF_FALCON_ID_FECS) |
|
||||
BIT32(LSF_FALCON_ID_GPCCS))) != 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -234,7 +234,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
|
||||
ret = pmu_enable_elpg_locked(g, pg_engine_id);
|
||||
}
|
||||
}
|
||||
@@ -313,7 +313,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
@@ -479,7 +479,7 @@ int nvgpu_pmu_init_powergating(struct gk20a *g)
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
|
||||
if (pmu != NULL &&
|
||||
pmu->pmu_state == PMU_STATE_INIT_RECEIVED) {
|
||||
nvgpu_pmu_state_change(g,
|
||||
@@ -622,7 +622,7 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
}
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
|
||||
g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
|
||||
pg_stat_data);
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ void gm20b_priv_ring_isr(struct gk20a *g)
|
||||
}
|
||||
|
||||
for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
|
||||
if (status1 & BIT(gpc)) {
|
||||
if ((status1 & BIT32(gpc)) != 0U) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc,
|
||||
gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride),
|
||||
gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride),
|
||||
|
||||
@@ -156,7 +156,7 @@ void gp10b_priv_ring_isr(struct gk20a *g)
|
||||
gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_PRIV_STRIDE);
|
||||
for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
|
||||
offset = gpc * gpc_stride;
|
||||
if (status1 & BIT(gpc)) {
|
||||
if ((status1 & BIT32(gpc)) != 0U) {
|
||||
error_info = gk20a_readl(g,
|
||||
pri_ringstation_gpc_gpc0_priv_error_info_r() + offset);
|
||||
error_code = gk20a_readl(g,
|
||||
|
||||
@@ -48,8 +48,8 @@ int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8)
|
||||
nvgpu_err(g,
|
||||
"Attempt to read temperature while sensor is OFF!");
|
||||
err = -EINVAL;
|
||||
} else if (therm_temp_sensor_tsense_state_v(readval) &
|
||||
therm_temp_sensor_tsense_state_shadow_v()) {
|
||||
} else if ((therm_temp_sensor_tsense_state_v(readval) &
|
||||
therm_temp_sensor_tsense_state_shadow_v()) != 0U) {
|
||||
nvgpu_err(g, "Reading temperature from SHADOWed sensor!");
|
||||
}
|
||||
|
||||
|
||||
@@ -749,7 +749,7 @@ void *nvgpu_bios_get_perf_table_ptrs(struct gk20a *g,
|
||||
|
||||
if (perf_table_id_offset != 0U) {
|
||||
/* check is perf_table_id_offset is > 64k */
|
||||
if (perf_table_id_offset & ~0xFFFFU) {
|
||||
if ((perf_table_id_offset & ~0xFFFFU) != 0U) {
|
||||
perf_table_ptr =
|
||||
&g->bios.data[g->bios.expansion_rom_offset +
|
||||
perf_table_id_offset];
|
||||
|
||||
@@ -394,13 +394,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
|
||||
top_device_info_runlist_enum_v(table_entry);
|
||||
nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
|
||||
|
||||
runlist_bit = BIT(runlist_id);
|
||||
runlist_bit = BIT32(runlist_id);
|
||||
|
||||
found_pbdma_for_runlist = false;
|
||||
for (pbdma_id = 0; pbdma_id < f->num_pbdma;
|
||||
pbdma_id++) {
|
||||
if (f->pbdma_map[pbdma_id] &
|
||||
runlist_bit) {
|
||||
if ((f->pbdma_map[pbdma_id] &
|
||||
runlist_bit) != 0U) {
|
||||
nvgpu_log_info(g,
|
||||
"gr info: pbdma_map[%d]=%d",
|
||||
pbdma_id,
|
||||
@@ -685,8 +685,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
|
||||
|
||||
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
|
||||
if (f->pbdma_map[pbdma_id] & BIT(runlist_id)) {
|
||||
runlist->pbdma_bitmask |= BIT(pbdma_id);
|
||||
if ((f->pbdma_map[pbdma_id] & BIT32(runlist_id)) != 0U) {
|
||||
runlist->pbdma_bitmask |= BIT32(pbdma_id);
|
||||
}
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x",
|
||||
@@ -1504,7 +1504,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
*/
|
||||
|
||||
for_each_set_bit(engine_id, &g->fifo.deferred_fault_engines, 32UL) {
|
||||
if (BIT64(engine_id) & engines) {
|
||||
if ((BIT64(engine_id) & engines) != 0ULL) {
|
||||
gk20a_fifo_reset_engine(g, (u32)engine_id);
|
||||
}
|
||||
}
|
||||
@@ -2189,7 +2189,7 @@ u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
|
||||
} else if (ctx_status ==
|
||||
fifo_engine_status_ctx_status_ctxsw_switch_v()) {
|
||||
mailbox2 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(2));
|
||||
if (mailbox2 & FECS_METHOD_WFI_RESTORE) {
|
||||
if ((mailbox2 & FECS_METHOD_WFI_RESTORE) != 0U) {
|
||||
id = fifo_engine_status_next_id_v(status);
|
||||
is_tsg = fifo_engine_status_next_id_type_v(status) !=
|
||||
fifo_engine_status_next_id_type_chid_v();
|
||||
@@ -2484,9 +2484,9 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
|
||||
unsigned long pbdma_intr_err;
|
||||
unsigned long bit;
|
||||
|
||||
if ((f->intr.pbdma.device_fatal_0 |
|
||||
f->intr.pbdma.channel_fatal_0 |
|
||||
f->intr.pbdma.restartable_0) & pbdma_intr_0) {
|
||||
if (((f->intr.pbdma.device_fatal_0 |
|
||||
f->intr.pbdma.channel_fatal_0 |
|
||||
f->intr.pbdma.restartable_0) & pbdma_intr_0) != 0U) {
|
||||
|
||||
pbdma_intr_err = (unsigned long)pbdma_intr_0;
|
||||
for_each_set_bit(bit, &pbdma_intr_err, 32U) {
|
||||
|
||||
@@ -173,7 +173,7 @@ static int flcn_mem_overflow_check(struct nvgpu_falcon *flcn,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (offset & 0x3U) {
|
||||
if ((offset & 0x3U) != 0U) {
|
||||
nvgpu_err(g, "offset (0x%08x) not 4-byte aligned", offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -530,7 +530,7 @@ static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn)
|
||||
u32 pc = 0;
|
||||
u32 i = 0;
|
||||
|
||||
if (gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02U) {
|
||||
if ((gk20a_readl(g, base_addr + falcon_falcon_sctl_r()) & 0x02U) != 0U) {
|
||||
nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
|
||||
}
|
||||
break;
|
||||
case GR_IS_UCODE_OP_AND:
|
||||
if (reg & mailbox_ok) {
|
||||
if ((reg & mailbox_ok) != 0U) {
|
||||
check = WAIT_UCODE_OK;
|
||||
}
|
||||
break;
|
||||
@@ -414,7 +414,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
|
||||
}
|
||||
break;
|
||||
case GR_IS_UCODE_OP_AND:
|
||||
if (reg & mailbox_fail) {
|
||||
if ((reg & mailbox_fail) != 0U) {
|
||||
check = WAIT_UCODE_ERROR;
|
||||
}
|
||||
break;
|
||||
@@ -3540,7 +3540,7 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
|
||||
}
|
||||
|
||||
mul_factor = S32(gr->gpc_count) * S32(max_tpc_count);
|
||||
if (mul_factor & 0x1) {
|
||||
if ((mul_factor & 0x1) != 0) {
|
||||
mul_factor = 2;
|
||||
} else {
|
||||
mul_factor = 1;
|
||||
@@ -5191,7 +5191,8 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) {
|
||||
if ((gr_fecs_intr &
|
||||
gr_fecs_host_int_status_umimp_firmware_method_f(1)) != 0U) {
|
||||
gk20a_gr_set_error_notifier(g, isr_data,
|
||||
NVGPU_ERR_NOTIFIER_FECS_ERR_UNIMP_FIRMWARE_METHOD);
|
||||
nvgpu_err(g,
|
||||
|
||||
@@ -357,7 +357,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
|
||||
offset = op->offset;
|
||||
|
||||
/* support only 24-bit 4-byte aligned offsets */
|
||||
if (offset & 0xFF000003) {
|
||||
if ((offset & 0xFF000003U) != 0U) {
|
||||
nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset);
|
||||
op->status |= REGOP(STATUS_INVALID_OFFSET);
|
||||
return -EINVAL;
|
||||
|
||||
@@ -43,9 +43,9 @@
|
||||
|
||||
#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
|
||||
#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
|
||||
#define DFS_TESTOUT_DET BIT(0)
|
||||
#define DFS_EXT_CAL_EN BIT(9)
|
||||
#define DFS_EXT_STROBE BIT(16)
|
||||
#define DFS_TESTOUT_DET BIT32(0)
|
||||
#define DFS_EXT_CAL_EN BIT32(9)
|
||||
#define DFS_EXT_STROBE BIT32(16)
|
||||
|
||||
#define BOOT_GPU_UV_B1 1000000 /* gpu rail boot voltage 1.0V */
|
||||
#define BOOT_GPU_UV_C1 800000 /* gpu rail boot voltage 0.8V */
|
||||
@@ -448,7 +448,7 @@ static void clk_set_dfs_ext_cal(struct gk20a *g, u32 dfs_det_cal)
|
||||
data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
|
||||
nvgpu_udelay(1);
|
||||
ctrl = trim_sys_gpcpll_dvfs1_dfs_ctrl_v(data);
|
||||
if (~ctrl & DFS_EXT_CAL_EN) {
|
||||
if ((~ctrl & DFS_EXT_CAL_EN) != 0U) {
|
||||
data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(),
|
||||
trim_sys_gpcpll_dvfs1_dfs_ctrl_f(
|
||||
ctrl | DFS_EXT_CAL_EN | DFS_TESTOUT_DET));
|
||||
|
||||
@@ -644,7 +644,7 @@ void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
val = gk20a_readl(g, gr_bes_crop_debug3_r());
|
||||
if ((data & 1)) {
|
||||
if ((data & 1U) != 0U) {
|
||||
val = set_field(val,
|
||||
gr_bes_crop_debug3_blendopt_read_suppress_m(),
|
||||
gr_bes_crop_debug3_blendopt_read_suppress_enabled_f());
|
||||
@@ -669,11 +669,11 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
val = gk20a_readl(g, gr_bes_crop_debug4_r());
|
||||
if (data & NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL) {
|
||||
if ((data & NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL) != 0U) {
|
||||
val = set_field(val,
|
||||
gr_bes_crop_debug4_clamp_fp_blend_m(),
|
||||
gr_bes_crop_debug4_clamp_fp_blend_to_maxval_f());
|
||||
} else if (data & NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_INF) {
|
||||
} else if ((data & NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_INF) != 0U) {
|
||||
val = set_field(val,
|
||||
gr_bes_crop_debug4_clamp_fp_blend_m(),
|
||||
gr_bes_crop_debug4_clamp_fp_blend_to_inf_f());
|
||||
@@ -1100,10 +1100,10 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
|
||||
gr_ctx->ctx_id_valid = false;
|
||||
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) {
|
||||
if ((flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) != 0U) {
|
||||
graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
|
||||
}
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) {
|
||||
if ((flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) != 0U) {
|
||||
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
|
||||
}
|
||||
|
||||
@@ -1996,7 +1996,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
|
||||
* INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR)
|
||||
* indicates that a CILP ctxsw save has finished
|
||||
*/
|
||||
if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) {
|
||||
if ((gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) != 0U) {
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||
"CILP: ctxsw save completed!\n");
|
||||
|
||||
|
||||
@@ -53,8 +53,8 @@ enum {
|
||||
#define NVC0C0_SET_SHADER_EXCEPTIONS 0x1528
|
||||
#define NVC0C0_SET_RD_COALESCE 0x0228
|
||||
|
||||
#define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_INF 0x0
|
||||
#define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL 0x1
|
||||
#define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_INF 0x0U
|
||||
#define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL 0x1U
|
||||
|
||||
int gr_gp10b_init_fs_state(struct gk20a *g);
|
||||
int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
||||
|
||||
@@ -388,7 +388,7 @@ int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
|
||||
|
||||
for (idx = 0; idx < regs->count; idx++) {
|
||||
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
|
||||
if (active_fbpa_mask & BIT(fbpa_id)) {
|
||||
if ((active_fbpa_mask & BIT32(fbpa_id)) != 0U) {
|
||||
map[cnt].addr = base +
|
||||
(regs->l[idx].addr & mask) +
|
||||
(fbpa_id * stride);
|
||||
@@ -422,7 +422,7 @@ void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
active_fbpa_mask = gr_gv100_get_active_fpba_mask(g);
|
||||
|
||||
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
|
||||
if (active_fbpa_mask & BIT(fbpa_id)) {
|
||||
if ((active_fbpa_mask & BIT32(fbpa_id)) != 0U) {
|
||||
priv_addr_table[(*t)++] = pri_fbpa_addr(g,
|
||||
pri_fbpa_addr_mask(g, addr), fbpa_id);
|
||||
}
|
||||
|
||||
@@ -758,12 +758,12 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
||||
|
||||
runlist = &f->runlist_info[rlid];
|
||||
|
||||
if (runlist->eng_bitmask & act_eng_bitmask) {
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |=
|
||||
fifo_sched_disable_runlist_m(rlid);
|
||||
}
|
||||
|
||||
if (runlist->pbdma_bitmask & pbdma_bitmask) {
|
||||
if ((runlist->pbdma_bitmask & pbdma_bitmask) != 0U) {
|
||||
runlists_mask |=
|
||||
fifo_sched_disable_runlist_m(rlid);
|
||||
}
|
||||
@@ -775,7 +775,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
||||
if (act_eng_bitmask != 0U) {
|
||||
/* eng ids are known */
|
||||
runlist = &f->runlist_info[rlid];
|
||||
if (runlist->eng_bitmask & act_eng_bitmask) {
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |=
|
||||
fifo_sched_disable_runlist_m(rlid);
|
||||
}
|
||||
@@ -1074,7 +1074,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
if (act_eng_bitmask != 0U) {
|
||||
/* eng ids are known */
|
||||
runlist = &f->runlist_info[rlid];
|
||||
if (runlist->eng_bitmask & act_eng_bitmask) {
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlist_id = rlid;
|
||||
num_runlists++;
|
||||
}
|
||||
@@ -1608,9 +1608,9 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
|
||||
for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
|
||||
active_eng_id = g->fifo.active_engines_list[engine_id];
|
||||
|
||||
if (ctxsw_timeout_engines &
|
||||
if ((ctxsw_timeout_engines &
|
||||
fifo_intr_ctxsw_timeout_engine_pending_f(
|
||||
active_eng_id)) {
|
||||
active_eng_id)) != 0U) {
|
||||
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
u32 ms = 0;
|
||||
|
||||
@@ -1279,7 +1279,7 @@ void gr_gv11b_set_shader_cut_collector(struct gk20a *g, u32 data)
|
||||
nvgpu_log_fn(g, "gr_gv11b_set_shader_cut_collector");
|
||||
|
||||
val = gk20a_readl(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r());
|
||||
if (data & NVC397_SET_SHADER_CUT_COLLECTOR_STATE_ENABLE) {
|
||||
if ((data & NVC397_SET_SHADER_CUT_COLLECTOR_STATE_ENABLE) != 0U) {
|
||||
val = set_field(val,
|
||||
gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_m(),
|
||||
gr_gpcs_tpcs_sm_l1tag_ctrl_always_cut_collector_enable_f());
|
||||
|
||||
@@ -74,8 +74,8 @@ enum {
|
||||
#define NVC397_SET_SKEDCHECK_18_DISABLE 0x1
|
||||
#define NVC397_SET_SKEDCHECK_18_ENABLE 0x2
|
||||
|
||||
#define NVC397_SET_SHADER_CUT_COLLECTOR_STATE_DISABLE 0x0
|
||||
#define NVC397_SET_SHADER_CUT_COLLECTOR_STATE_ENABLE 0x1
|
||||
#define NVC397_SET_SHADER_CUT_COLLECTOR_STATE_DISABLE 0x0U
|
||||
#define NVC397_SET_SHADER_CUT_COLLECTOR_STATE_ENABLE 0x1U
|
||||
|
||||
#define NVC3C0_SET_SKEDCHECK 0x23c
|
||||
#define NVC3C0_SET_SHADER_CUT_COLLECTOR 0x250
|
||||
|
||||
@@ -47,13 +47,13 @@
|
||||
* Falcon Id Defines
|
||||
* Defines a common Light Secure Falcon identifier.
|
||||
*/
|
||||
#define LSF_FALCON_ID_PMU (0)
|
||||
#define LSF_FALCON_ID_GSPLITE (1)
|
||||
#define LSF_FALCON_ID_FECS (2)
|
||||
#define LSF_FALCON_ID_GPCCS (3)
|
||||
#define LSF_FALCON_ID_SEC2 (7)
|
||||
#define LSF_FALCON_ID_END (11)
|
||||
#define LSF_FALCON_ID_INVALID (0xFFFFFFFF)
|
||||
#define LSF_FALCON_ID_PMU 0U
|
||||
#define LSF_FALCON_ID_GSPLITE 1U
|
||||
#define LSF_FALCON_ID_FECS 2U
|
||||
#define LSF_FALCON_ID_GPCCS 3U
|
||||
#define LSF_FALCON_ID_SEC2 7U
|
||||
#define LSF_FALCON_ID_END 11U
|
||||
#define LSF_FALCON_ID_INVALID 0xFFFFFFFFU
|
||||
|
||||
/*
|
||||
* Light Secure Falcon Ucode Description Defines
|
||||
|
||||
@@ -410,20 +410,25 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask);
|
||||
(n32) = ((((n32) + ((n32) >> 4U)) & 0x0F0F0F0FU) * 0x01010101U) >> 24U;\
|
||||
}
|
||||
|
||||
#define IDX_32(n32) \
|
||||
{ \
|
||||
u32 idx = 0U; \
|
||||
if ((n32) & 0xFFFF0000U) \
|
||||
idx += 16U; \
|
||||
if ((n32) & 0xFF00FF00U) \
|
||||
idx += 8U; \
|
||||
if ((n32) & 0xF0F0F0F0U) \
|
||||
idx += 4U; \
|
||||
if ((n32) & 0xCCCCCCCCU) \
|
||||
idx += 2U; \
|
||||
if ((n32) & 0xAAAAAAAAU) \
|
||||
idx += 1U; \
|
||||
(n32) = idx; \
|
||||
#define IDX_32(n32) \
|
||||
{ \
|
||||
u32 idx = 0U; \
|
||||
if (((n32) & 0xFFFF0000U) != 0U) { \
|
||||
idx += 16U; \
|
||||
} \
|
||||
if (((n32) & 0xFF00FF00U) != 0U) { \
|
||||
idx += 8U; \
|
||||
} \
|
||||
if (((n32) & 0xF0F0F0F0U) != 0U) { \
|
||||
idx += 4U; \
|
||||
} \
|
||||
if (((n32) & 0xCCCCCCCCU) != 0U) { \
|
||||
idx += 2U; \
|
||||
} \
|
||||
if (((n32) & 0xAAAAAAAAU) != 0U) { \
|
||||
idx += 1U; \
|
||||
} \
|
||||
(n32) = idx; \
|
||||
}
|
||||
|
||||
static inline struct boardobjgrp *
|
||||
|
||||
@@ -39,18 +39,18 @@ struct nvgpu_channel_sync;
|
||||
struct nvgpu_gpfifo_userdata;
|
||||
|
||||
/* Flags to be passed to nvgpu_channel_setup_bind() */
|
||||
#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR (1U << 0U)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC (1U << 1U)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE (1U << 2U)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT (1U << 3U)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR BIT32(0)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC BIT32(1)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE BIT32(2)
|
||||
#define NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT BIT32(3)
|
||||
|
||||
/* Flags to be passed to nvgpu_submit_channel_gpfifo() */
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1U << 0U)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_GET (1U << 1U)
|
||||
#define NVGPU_SUBMIT_FLAGS_HW_FORMAT (1U << 2U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SYNC_FENCE (1U << 3U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI (1U << 4U)
|
||||
#define NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING (1U << 5U)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_WAIT BIT32(0)
|
||||
#define NVGPU_SUBMIT_FLAGS_FENCE_GET BIT32(1)
|
||||
#define NVGPU_SUBMIT_FLAGS_HW_FORMAT BIT32(2)
|
||||
#define NVGPU_SUBMIT_FLAGS_SYNC_FENCE BIT32(3)
|
||||
#define NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI BIT32(4)
|
||||
#define NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING BIT32(5)
|
||||
|
||||
/*
|
||||
* The binary format of 'struct nvgpu_channel_fence' introduced here
|
||||
|
||||
@@ -60,8 +60,8 @@ nvgpu_vm_area_from_vm_area_list(struct nvgpu_list_node *node)
|
||||
/*
|
||||
* Alloc space flags.
|
||||
*/
|
||||
#define NVGPU_VM_AREA_ALLOC_FIXED_OFFSET BIT(0)
|
||||
#define NVGPU_VM_AREA_ALLOC_SPARSE BIT(1)
|
||||
#define NVGPU_VM_AREA_ALLOC_FIXED_OFFSET BIT32(0)
|
||||
#define NVGPU_VM_AREA_ALLOC_SPARSE BIT32(1)
|
||||
|
||||
int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
|
||||
u64 *addr, u32 flags);
|
||||
|
||||
@@ -103,7 +103,7 @@ static int get_lpwr_gr_table(struct gk20a *g)
|
||||
sizeof(struct nvgpu_bios_lpwr_gr_table_1x_entry));
|
||||
|
||||
if (BIOS_GET_FIELD(entry.feautre_mask,
|
||||
NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
|
||||
NV_VBIOS_LPWR_MS_FEATURE_MASK_MS) != 0U) {
|
||||
pgr_data->entry[idx].gr_enabled = true;
|
||||
|
||||
pgr_data->entry[idx].feature_mask =
|
||||
@@ -157,7 +157,7 @@ static int get_lpwr_ms_table(struct gk20a *g)
|
||||
sizeof(struct nvgpu_bios_lpwr_ms_table_1x_entry));
|
||||
|
||||
if (BIOS_GET_FIELD(entry.feautre_mask,
|
||||
NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
|
||||
NV_VBIOS_LPWR_MS_FEATURE_MASK_MS) != 0U) {
|
||||
pms_data->entry[idx].ms_enabled = true;
|
||||
|
||||
pms_data->entry[idx].feature_mask =
|
||||
|
||||
@@ -96,10 +96,10 @@ void tu104_fbpa_handle_intr(struct gk20a *g, u32 fbpa_id)
|
||||
return;
|
||||
}
|
||||
|
||||
if (status & ecc_subp0_mask) {
|
||||
if ((status & ecc_subp0_mask) != 0U) {
|
||||
tu104_fbpa_handle_ecc_intr(g, fbpa_id, 0u);
|
||||
}
|
||||
if (status & ecc_subp1_mask) {
|
||||
if ((status & ecc_subp1_mask) != 0U) {
|
||||
tu104_fbpa_handle_ecc_intr(g, fbpa_id, 1u);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -360,8 +360,8 @@ bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2)
|
||||
psec_falcon_irqstat_exterr_true_f() |
|
||||
psec_falcon_irqstat_swgen0_true_f();
|
||||
|
||||
if (gk20a_readl(g, psec_falcon_irqstat_r()) &
|
||||
servicedpmuint) {
|
||||
if ((gk20a_readl(g, psec_falcon_irqstat_r()) &
|
||||
servicedpmuint) != 0U) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user