nvgpu: common: MISRA 10.1 boolean fixes

Fix violations where a variable of type non-boolean is used as a
boolean in gpu/nvgpu/common.

JIRA NVGPU-646

Change-Id: I64e96e02e9a3d5d5604c4fa52460e0415f484d75
Signed-off-by: Amulya <Amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1807128
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com>
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amulya
2018-08-27 10:13:26 +05:30
committed by mobile promotions
parent a39d91b591
commit 3e6a445310
17 changed files with 109 additions and 95 deletions

View File

@@ -60,15 +60,15 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (big_page_size == 0) { if (big_page_size == 0U) {
big_page_size = g->ops.mm.get_default_big_page_size(); big_page_size = g->ops.mm.get_default_big_page_size();
} else { } else {
if (!is_power_of_2(big_page_size)) { if (!is_power_of_2(big_page_size)) {
return -EINVAL; return -EINVAL;
} }
if (!(big_page_size & if ((big_page_size &
nvgpu_mm_get_available_big_page_sizes(g))) { nvgpu_mm_get_available_big_page_sizes(g)) == 0U) {
return -EINVAL; return -EINVAL;
} }
} }
@@ -80,7 +80,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
mm->channel.kernel_size, mm->channel.kernel_size,
mm->channel.user_size + mm->channel.kernel_size, mm->channel.user_size + mm->channel.kernel_size,
!mm->disable_bigpage, userspace_managed, name); !mm->disable_bigpage, userspace_managed, name);
if (!vm) { if (vm == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -100,13 +100,13 @@ int gk20a_as_alloc_share(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
g = gk20a_get(g); g = gk20a_get(g);
if (!g) { if (g == NULL) {
return -ENODEV; return -ENODEV;
} }
*out = NULL; *out = NULL;
as_share = nvgpu_kzalloc(g, sizeof(*as_share)); as_share = nvgpu_kzalloc(g, sizeof(*as_share));
if (!as_share) { if (as_share == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -81,7 +81,7 @@ u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
bufbase + nvgpu_sgt_get_phys(g, sgt, sgl), bufbase + nvgpu_sgt_get_phys(g, sgt, sgl),
nvgpu_sgt_get_length(sgt, sgl)); nvgpu_sgt_get_length(sgt, sgl));
WARN_ON(!bufbase); WARN_ON(bufbase == 0ULL);
if (g->mm.pramin_window != win) { if (g->mm.pramin_window != win) {
gk20a_writel(g, bus_bar0_window_r(), win); gk20a_writel(g, bus_bar0_window_r(), win);

View File

@@ -53,12 +53,12 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
u32 val = gk20a_readl(g, bus_bind_status_r()); u32 val = gk20a_readl(g, bus_bind_status_r());
u32 pending = bus_bind_status_bar1_pending_v(val); u32 pending = bus_bind_status_bar1_pending_v(val);
u32 outstanding = bus_bind_status_bar1_outstanding_v(val); u32 outstanding = bus_bind_status_bar1_outstanding_v(val);
if (!pending && !outstanding) { if (pending == 0U && outstanding == 0U) {
break; break;
} }
nvgpu_udelay(5); nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
err = -EINVAL; err = -EINVAL;

View File

@@ -50,12 +50,12 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
u32 val = gk20a_readl(g, bus_bind_status_r()); u32 val = gk20a_readl(g, bus_bind_status_r());
u32 pending = bus_bind_status_bar2_pending_v(val); u32 pending = bus_bind_status_bar2_pending_v(val);
u32 outstanding = bus_bind_status_bar2_outstanding_v(val); u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
if (!pending && !outstanding) { if (pending == 0U && outstanding == 0U) {
break; break;
} }
nvgpu_udelay(5); nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
err = -EINVAL; err = -EINVAL;

View File

@@ -34,7 +34,7 @@ static inline int gk20a_get_valid_launch_flags(struct gk20a *g, int launch_flags
{ {
/* there is no local memory available, /* there is no local memory available,
don't allow local memory related CE flags */ don't allow local memory related CE flags */
if (!g->mm.vidmem.size) { if (g->mm.vidmem.size == 0ULL) {
launch_flags &= ~(NVGPU_CE_SRC_LOCATION_LOCAL_FB | launch_flags &= ~(NVGPU_CE_SRC_LOCATION_LOCAL_FB |
NVGPU_CE_DST_LOCATION_LOCAL_FB); NVGPU_CE_DST_LOCATION_LOCAL_FB);
} }

View File

@@ -33,7 +33,7 @@ int nvgpu_init_enabled_flags(struct gk20a *g)
g->enabled_flags = nvgpu_kzalloc(g, g->enabled_flags = nvgpu_kzalloc(g,
BITS_TO_LONGS(NVGPU_MAX_ENABLED_BITS) * BITS_TO_LONGS(NVGPU_MAX_ENABLED_BITS) *
sizeof(unsigned long)); sizeof(unsigned long));
if (!g->enabled_flags) { if (g->enabled_flags == NULL) {
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -38,7 +38,7 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 idle_stat; u32 idle_stat;
if (!flcn_ops->is_falcon_idle) { if (flcn_ops->is_falcon_idle == NULL) {
nvgpu_warn(g, "Invalid op on falcon 0x%x ", flcn->flcn_id); nvgpu_warn(g, "Invalid op on falcon 0x%x ", flcn->flcn_id);
return -EINVAL; return -EINVAL;
} }
@@ -79,7 +79,7 @@ int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn)
goto exit; goto exit;
} }
nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT); nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
status = -ETIMEDOUT; status = -ETIMEDOUT;
@@ -95,7 +95,7 @@ int nvgpu_flcn_reset(struct nvgpu_falcon *flcn)
if (flcn->flcn_ops.reset != NULL) { if (flcn->flcn_ops.reset != NULL) {
status = flcn->flcn_ops.reset(flcn); status = flcn->flcn_ops.reset(flcn);
if (!status) { if (status == 0) {
status = nvgpu_flcn_mem_scrub_wait(flcn); status = nvgpu_flcn_mem_scrub_wait(flcn);
} }
} else { } else {
@@ -165,7 +165,7 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
} }
nvgpu_udelay(10); nvgpu_udelay(10);
} while (!nvgpu_timeout_expired(&to)); } while (nvgpu_timeout_expired(&to) == 0);
if (nvgpu_timeout_peek_expired(&to)) { if (nvgpu_timeout_peek_expired(&to)) {
status = -EBUSY; status = -EBUSY;
@@ -182,7 +182,7 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
struct nvgpu_timeout to; struct nvgpu_timeout to;
int status = 0; int status = 0;
if (!flcn_ops->clear_halt_interrupt_status) { if (flcn_ops->clear_halt_interrupt_status == NULL) {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id); flcn->flcn_id);
return -EINVAL; return -EINVAL;
@@ -195,7 +195,7 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
} }
nvgpu_udelay(1); nvgpu_udelay(1);
} while (!nvgpu_timeout_expired(&to)); } while (nvgpu_timeout_expired(&to) == 0);
if (nvgpu_timeout_peek_expired(&to)) { if (nvgpu_timeout_peek_expired(&to)) {
status = -EBUSY; status = -EBUSY;
@@ -309,9 +309,10 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
total_block_read = size >> 8; total_block_read = size >> 8;
do { do {
byte_read_count = total_block_read ? sizeof(buff) : size; byte_read_count =
(total_block_read != 0U) ? sizeof(buff) : size;
if (!byte_read_count) { if (byte_read_count == 0U) {
break; break;
} }

View File

@@ -256,7 +256,7 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
return 0; return 0;
} }
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT; return -ETIMEDOUT;
} }

View File

@@ -150,7 +150,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
} else { } else {
mem_unlock_fw = nvgpu_request_firmware(g, MEM_UNLOCK_DBG_BIN, 0); mem_unlock_fw = nvgpu_request_firmware(g, MEM_UNLOCK_DBG_BIN, 0);
} }
if (!mem_unlock_fw) { if (mem_unlock_fw == NULL) {
nvgpu_err(g, "mem unlock ucode get fail"); nvgpu_err(g, "mem unlock ucode get fail");
err = -ENOENT; err = -ENOENT;
goto exit; goto exit;

View File

@@ -342,8 +342,8 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
fault_status = g->ops.fb.read_mmu_fault_status(g); fault_status = g->ops.fb.read_mmu_fault_status(g);
do { do {
if (!(fault_status & if ((fault_status &
fb_mmu_fault_status_busy_true_f())) { fb_mmu_fault_status_busy_true_f()) == 0U) {
break; break;
} }
/* /*
@@ -356,8 +356,8 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
nvgpu_usleep_range(delay, delay * 2); nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
"fault status busy set")); "fault status busy set") == 0);
} }
} }
@@ -438,10 +438,10 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(); fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */ /* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) { if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
} }
if ((uncorrected_delta > 0) || uncorrected_overflow) { if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
} }
@@ -470,7 +470,7 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) != 0U) { fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
} }
if (corrected_overflow || uncorrected_overflow) { if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "mmu l2tlb ecc counter overflow!"); nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
} }
@@ -505,10 +505,10 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(); fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */ /* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) { if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
} }
if ((uncorrected_delta > 0) || uncorrected_overflow) { if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
} }
@@ -537,7 +537,7 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) != 0U) { fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
} }
if (corrected_overflow || uncorrected_overflow) { if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "mmu hubtlb ecc counter overflow!"); nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
} }
@@ -572,10 +572,10 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(); fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
/* clear the interrupt */ /* clear the interrupt */
if ((corrected_delta > 0) || corrected_overflow) { if ((corrected_delta > 0U) || (corrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
} }
if ((uncorrected_delta > 0) || uncorrected_overflow) { if ((uncorrected_delta > 0U) || (uncorrected_overflow != 0U)) {
gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0); gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
} }
@@ -613,7 +613,7 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error"); nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
} }
if (corrected_overflow || uncorrected_overflow) { if ((corrected_overflow != 0U) || (uncorrected_overflow != 0U)) {
nvgpu_info(g, "mmu fillunit ecc counter overflow!"); nvgpu_info(g, "mmu fillunit ecc counter overflow!");
} }
@@ -666,7 +666,7 @@ static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
static void gv11b_fb_print_fault_info(struct gk20a *g, static void gv11b_fb_print_fault_info(struct gk20a *g,
struct mmu_fault_info *mmfault) struct mmu_fault_info *mmfault)
{ {
if (mmfault && mmfault->valid) { if (mmfault != NULL && mmfault->valid) {
nvgpu_err(g, "[MMU FAULT] " nvgpu_err(g, "[MMU FAULT] "
"mmu engine id: %d, " "mmu engine id: %d, "
"ch id: %d, " "ch id: %d, "
@@ -804,7 +804,8 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
mmfault->client_id = mmfault->client_id =
gmmu_fault_buf_entry_client_v(rd32_val); gmmu_fault_buf_entry_client_v(rd32_val);
mmfault->replayable_fault = mmfault->replayable_fault =
gmmu_fault_buf_entry_replayable_fault_v(rd32_val); (gmmu_fault_buf_entry_replayable_fault_v(rd32_val) ==
gmmu_fault_buf_entry_replayable_fault_true_v());
mmfault->fault_type = mmfault->fault_type =
gmmu_fault_buf_entry_fault_type_v(rd32_val); gmmu_fault_buf_entry_fault_type_v(rd32_val);
@@ -822,7 +823,8 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
mmfault->replay_fault_en = mmfault->replay_fault_en =
gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val); gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val);
mmfault->valid = gmmu_fault_buf_entry_valid_v(rd32_val); mmfault->valid = (gmmu_fault_buf_entry_valid_v(rd32_val) ==
gmmu_fault_buf_entry_valid_true_v());
rd32_val = nvgpu_mem_rd32(g, mem, offset + rd32_val = nvgpu_mem_rd32(g, mem, offset +
gmmu_fault_buf_entry_fault_type_w()); gmmu_fault_buf_entry_fault_type_w());
@@ -856,8 +858,8 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
/* CE page faults are not reported as replayable */ /* CE page faults are not reported as replayable */
nvgpu_log(g, gpu_dbg_intr, "CE Faulted"); nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
err = gv11b_fb_fix_page_fault(g, mmfault); err = gv11b_fb_fix_page_fault(g, mmfault);
if (mmfault->refch && if ((mmfault->refch != NULL) &&
(u32)mmfault->refch->tsgid != FIFO_INVAL_TSG_ID) { ((u32)mmfault->refch->tsgid != FIFO_INVAL_TSG_ID)) {
gv11b_fifo_reset_pbdma_and_eng_faulted(g, gv11b_fifo_reset_pbdma_and_eng_faulted(g,
&g->fifo.tsg[mmfault->refch->tsgid], &g->fifo.tsg[mmfault->refch->tsgid],
mmfault->faulted_pbdma, mmfault->faulted_pbdma,
@@ -1086,7 +1088,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
memset(mmfault, 0, sizeof(*mmfault)); memset(mmfault, 0, sizeof(*mmfault));
if (!(fault_status & fb_mmu_fault_status_valid_set_f())) { if ((fault_status & fb_mmu_fault_status_valid_set_f()) == 0U) {
nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set"); nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set");
return; return;
@@ -1131,7 +1133,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
reg_val = g->ops.fb.read_mmu_fault_info(g); reg_val = g->ops.fb.read_mmu_fault_info(g);
mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val); mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val);
mmfault->replayable_fault = mmfault->replayable_fault =
fb_mmu_fault_info_replayable_fault_v(reg_val); (fb_mmu_fault_info_replayable_fault_v(reg_val) == 1U);
mmfault->client_id = fb_mmu_fault_info_client_v(reg_val); mmfault->client_id = fb_mmu_fault_info_client_v(reg_val);
mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val); mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val);
mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val); mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val);
@@ -1141,7 +1143,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
mmfault->replay_fault_en = mmfault->replay_fault_en =
fb_mmu_fault_info_replayable_fault_en_v(reg_val); fb_mmu_fault_info_replayable_fault_en_v(reg_val);
mmfault->valid = fb_mmu_fault_info_valid_v(reg_val); mmfault->valid = (fb_mmu_fault_info_valid_v(reg_val) == 1U);
fault_status &= ~(fb_mmu_fault_status_valid_m()); fault_status &= ~(fb_mmu_fault_status_valid_m());
g->ops.fb.write_mmu_fault_status(g, fault_status); g->ops.fb.write_mmu_fault_status(g, fault_status);
@@ -1304,7 +1306,7 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
{ {
u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
if (!(fault_status & fb_mmu_fault_status_replayable_m())) { if ((fault_status & fb_mmu_fault_status_replayable_m()) == 0U) {
return; return;
} }
@@ -1473,8 +1475,8 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
break; break;
} }
nvgpu_udelay(5); nvgpu_udelay(5);
} while (!nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
"invalidate replay failed on 0x%llx")); "invalidate replay failed on 0x%llx") == 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "invalidate replay timedout"); nvgpu_err(g, "invalidate replay timedout");
} }

View File

@@ -51,15 +51,18 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
bool new_sync_created = false; bool new_sync_created = false;
int wait_fence_fd = -1; int wait_fence_fd = -1;
int err = 0; int err = 0;
bool need_wfi = !(flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI); bool need_wfi = (flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI) == 0U;
bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c); bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c);
struct nvgpu_channel_sync_syncpt *sync_syncpt = NULL; struct nvgpu_channel_sync_syncpt *sync_syncpt = NULL;
bool fence_get = (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) != 0U;
bool sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U;
bool fence_wait = (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) != 0U;
if (g->aggressive_sync_destroy_thresh) { if (g->aggressive_sync_destroy_thresh) {
nvgpu_mutex_acquire(&c->sync_lock); nvgpu_mutex_acquire(&c->sync_lock);
if (!c->sync) { if (c->sync == NULL) {
c->sync = nvgpu_channel_sync_create(c, false); c->sync = nvgpu_channel_sync_create(c, false);
if (!c->sync) { if (c->sync == NULL) {
err = -ENOMEM; err = -ENOMEM;
nvgpu_mutex_release(&c->sync_lock); nvgpu_mutex_release(&c->sync_lock);
goto fail; goto fail;
@@ -70,7 +73,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
nvgpu_mutex_release(&c->sync_lock); nvgpu_mutex_release(&c->sync_lock);
} }
if (g->ops.fifo.resetup_ramfc && new_sync_created) { if ((g->ops.fifo.resetup_ramfc != NULL) && new_sync_created) {
err = g->ops.fifo.resetup_ramfc(c); err = g->ops.fifo.resetup_ramfc(c);
if (err != 0) { if (err != 0) {
goto fail; goto fail;
@@ -81,7 +84,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
* Optionally insert syncpt/semaphore wait in the beginning of gpfifo * Optionally insert syncpt/semaphore wait in the beginning of gpfifo
* submission when user requested and the wait hasn't expired. * submission when user requested and the wait hasn't expired.
*/ */
if (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) { if (fence_wait) {
u32 max_wait_cmds = c->deterministic ? 1U : 0U; u32 max_wait_cmds = c->deterministic ? 1U : 0U;
if (!pre_alloc_enabled) { if (!pre_alloc_enabled) {
@@ -89,12 +92,12 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
sizeof(struct priv_cmd_entry)); sizeof(struct priv_cmd_entry));
} }
if (!job->wait_cmd) { if (job->wait_cmd == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }
if (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) { if (sync_fence) {
wait_fence_fd = fence->id; wait_fence_fd = fence->id;
err = nvgpu_channel_sync_wait_fence_fd(c->sync, err = nvgpu_channel_sync_wait_fence_fd(c->sync,
wait_fence_fd, job->wait_cmd, max_wait_cmds); wait_fence_fd, job->wait_cmd, max_wait_cmds);
@@ -118,8 +121,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
} }
} }
if ((flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) && if (fence_get && sync_fence) {
(flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) {
need_sync_fence = true; need_sync_fence = true;
} }
@@ -129,7 +131,7 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
* sync_pt/semaphore PB is added to the GPFIFO later on in submit. * sync_pt/semaphore PB is added to the GPFIFO later on in submit.
*/ */
job->post_fence = gk20a_alloc_fence(c); job->post_fence = gk20a_alloc_fence(c);
if (!job->post_fence) { if (job->post_fence == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto clean_up_wait_cmd; goto clean_up_wait_cmd;
} }
@@ -137,12 +139,12 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry)); job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry));
} }
if (!job->incr_cmd) { if (job->incr_cmd == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto clean_up_post_fence; goto clean_up_post_fence;
} }
if (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) { if (fence_get) {
err = nvgpu_channel_sync_incr_user(c->sync, err = nvgpu_channel_sync_incr_user(c->sync,
wait_fence_fd, job->incr_cmd, wait_fence_fd, job->incr_cmd,
job->post_fence, need_wfi, need_sync_fence, job->post_fence, need_wfi, need_sync_fence,
@@ -282,7 +284,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
struct gk20a *g = c->g; struct gk20a *g = c->g;
int err; int err;
if (!kern_gpfifo && !c->gpfifo.pipe) { if ((kern_gpfifo == NULL) && (c->gpfifo.pipe == NULL)) {
/* /*
* This path (from userspace to sysmem) is special in order to * This path (from userspace to sysmem) is special in order to
* avoid two copies unnecessarily (from user to pipe, then from * avoid two copies unnecessarily (from user to pipe, then from
@@ -293,7 +295,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
if (err != 0) { if (err != 0) {
return err; return err;
} }
} else if (!kern_gpfifo) { } else if (kern_gpfifo == NULL) {
/* from userspace to vidmem, use the common path */ /* from userspace to vidmem, use the common path */
err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata, err = g->os_channel.copy_user_gpfifo(c->gpfifo.pipe, userdata,
0, num_entries); 0, num_entries);
@@ -335,11 +337,13 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
* and one for post fence. */ * and one for post fence. */
const u32 extra_entries = 2U; const u32 extra_entries = 2U;
bool skip_buffer_refcounting = (flags & bool skip_buffer_refcounting = (flags &
NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING); NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING) != 0U;
int err = 0; int err = 0;
bool need_job_tracking; bool need_job_tracking;
bool need_deferred_cleanup = false; bool need_deferred_cleanup = false;
bool fence_wait = (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) != 0U;
bool fence_get = (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) != 0U;
bool sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U;
if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
return -ENODEV; return -ENODEV;
} }
@@ -365,9 +369,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
return -ENOMEM; return -ENOMEM;
} }
if ((flags & (NVGPU_SUBMIT_FLAGS_FENCE_WAIT | if ((fence_wait || fence_get) && (fence == NULL)) {
NVGPU_SUBMIT_FLAGS_FENCE_GET)) &&
!fence) {
return -EINVAL; return -EINVAL;
} }
@@ -397,12 +399,12 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
* required and a fast submit can be done (ie. only need to write * required and a fast submit can be done (ie. only need to write
* out userspace GPFIFO entries and update GP_PUT). * out userspace GPFIFO entries and update GP_PUT).
*/ */
need_job_tracking = (flags & NVGPU_SUBMIT_FLAGS_FENCE_WAIT) || need_job_tracking = (fence_wait ||
(flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) || fence_get ||
c->timeout.enabled || c->timeout.enabled ||
(nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE) (nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE)
&& !c->deterministic) || && !c->deterministic) ||
!skip_buffer_refcounting; !skip_buffer_refcounting);
if (need_job_tracking) { if (need_job_tracking) {
bool need_sync_framework = false; bool need_sync_framework = false;
@@ -417,9 +419,8 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
} }
need_sync_framework = need_sync_framework =
nvgpu_channel_sync_needs_os_fence_framework(g) || (nvgpu_channel_sync_needs_os_fence_framework(g) ||
(flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE && (sync_fence && fence_get));
flags & NVGPU_SUBMIT_FLAGS_FENCE_GET);
/* /*
* Deferred clean-up is necessary for any of the following * Deferred clean-up is necessary for any of the following

View File

@@ -35,6 +35,8 @@
int gm20b_fuse_check_priv_security(struct gk20a *g) int gm20b_fuse_check_priv_security(struct gk20a *g)
{ {
u32 gcplex_config; u32 gcplex_config;
bool is_wpr_enabled = false;
bool is_auto_fetch_disable = false;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
@@ -58,10 +60,11 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
* and vpr settings from tegra mc * and vpr settings from tegra mc
*/ */
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
if ((gcplex_config & is_wpr_enabled =
GCPLEX_CONFIG_WPR_ENABLED_MASK) && (gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U;
!(gcplex_config & is_auto_fetch_disable =
GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { (gcplex_config & GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK) != 0U;
if (is_wpr_enabled && !is_auto_fetch_disable) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) { if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) {
nvgpu_log(g, gpu_dbg_info, nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, " "gcplex_config = 0x%08x, "

View File

@@ -36,6 +36,8 @@
int gp10b_fuse_check_priv_security(struct gk20a *g) int gp10b_fuse_check_priv_security(struct gk20a *g)
{ {
u32 gcplex_config; u32 gcplex_config;
bool is_wpr_enabled = false;
bool is_auto_fetch_disable = false;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
@@ -58,10 +60,11 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
*/ */
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
if ((gcplex_config & is_wpr_enabled =
GCPLEX_CONFIG_WPR_ENABLED_MASK) && (gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U;
!(gcplex_config & is_auto_fetch_disable =
GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK)) { (gcplex_config & GCPLEX_CONFIG_VPR_AUTO_FETCH_DISABLE_MASK) != 0U;
if (is_wpr_enabled && !is_auto_fetch_disable) {
if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) { if (gk20a_readl(g, fuse_opt_sec_debug_en_r())) {
nvgpu_log(g, gpu_dbg_info, nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, " "gcplex_config = 0x%08x, "

View File

@@ -43,7 +43,7 @@ int nvgpu_init_ltc_support(struct gk20a *g)
void nvgpu_ltc_sync_enabled(struct gk20a *g) void nvgpu_ltc_sync_enabled(struct gk20a *g)
{ {
if (!g->ops.ltc.set_enabled) { if (g->ops.ltc.set_enabled == NULL) {
return; return;
} }

View File

@@ -174,11 +174,11 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
NVGPU_TIMER_RETRY_TIMER); NVGPU_TIMER_RETRY_TIMER);
do { do {
val = gk20a_readl(g, ctrl1); val = gk20a_readl(g, ctrl1);
if (!(val & hw_op)) { if ((val & hw_op) == 0U) {
break; break;
} }
nvgpu_udelay(5); nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
nvgpu_err(g, "comp tag clear timeout"); nvgpu_err(g, "comp tag clear timeout");
@@ -284,6 +284,8 @@ void gm20b_flush_ltc(struct gk20a *g)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
unsigned int ltc; unsigned int ltc;
u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
bool is_clean_pending_set = false;
bool is_invalidate_pending_set = false;
/* Clean... */ /* Clean... */
nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(), nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(),
@@ -321,10 +323,11 @@ void gm20b_flush_ltc(struct gk20a *g)
int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() + int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() +
ltc * ltc_stride; ltc * ltc_stride;
op_pending = gk20a_readl(g, cmgmt1); op_pending = gk20a_readl(g, cmgmt1);
} while ((op_pending & is_clean_pending_set = (op_pending &
ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) && ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) != 0U;
!nvgpu_timeout_expired_msg(&timeout, } while (is_clean_pending_set &&
"L2 flush timeout!")); (nvgpu_timeout_expired_msg(&timeout,
"L2 flush timeout!") == 0));
} }
/* And invalidate. */ /* And invalidate. */
@@ -346,10 +349,11 @@ void gm20b_flush_ltc(struct gk20a *g)
int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() + int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() +
ltc * ltc_stride; ltc * ltc_stride;
op_pending = gk20a_readl(g, cmgmt0); op_pending = gk20a_readl(g, cmgmt0);
} while ((op_pending & is_invalidate_pending_set = (op_pending &
ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) && ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) != 0U;
!nvgpu_timeout_expired_msg(&timeout, } while (is_invalidate_pending_set &&
"L2 flush timeout!")); (nvgpu_timeout_expired_msg(&timeout,
"L2 flush timeout!") == 0));
} }
} }

View File

@@ -201,11 +201,11 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
NVGPU_TIMER_RETRY_TIMER); NVGPU_TIMER_RETRY_TIMER);
do { do {
val = gk20a_readl(g, ctrl1); val = gk20a_readl(g, ctrl1);
if (!(val & hw_op)) { if ((val & hw_op) == 0U) {
break; break;
} }
nvgpu_udelay(5); nvgpu_udelay(5);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
nvgpu_err(g, "comp tag clear timeout"); nvgpu_err(g, "comp tag clear timeout");

View File

@@ -57,9 +57,9 @@ struct mmu_fault_info {
u32 fault_type; u32 fault_type;
u32 access_type; u32 access_type;
u32 protected_mode; u32 protected_mode;
u32 replayable_fault; bool replayable_fault;
u32 replay_fault_en; u32 replay_fault_en;
u32 valid; bool valid;
u32 faulted_pbdma; u32 faulted_pbdma;
u32 faulted_engine; u32 faulted_engine;
u32 faulted_subid; u32 faulted_subid;