mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I599cce2af1d6cdc24efefba4ec42abfe998aec47 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795845 Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
de10cedf8c
commit
9e69e0cf97
@@ -64,11 +64,14 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
||||
if (big_page_size == 0) {
|
||||
big_page_size = g->ops.mm.get_default_big_page_size();
|
||||
} else {
|
||||
if (!is_power_of_2(big_page_size))
|
||||
if (!is_power_of_2(big_page_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(big_page_size & nvgpu_mm_get_available_big_page_sizes(g)))
|
||||
if (!(big_page_size &
|
||||
nvgpu_mm_get_available_big_page_sizes(g))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "as_%d", as_share->id);
|
||||
@@ -78,8 +81,9 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
|
||||
mm->channel.kernel_size,
|
||||
mm->channel.user_size + mm->channel.kernel_size,
|
||||
!mm->disable_bigpage, userspace_managed, name);
|
||||
if (!vm)
|
||||
if (!vm) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
as_share->vm = vm;
|
||||
vm->as_share = as_share;
|
||||
@@ -97,26 +101,30 @@ int gk20a_as_alloc_share(struct gk20a *g,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
g = gk20a_get(g);
|
||||
if (!g)
|
||||
if (!g) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
*out = NULL;
|
||||
as_share = nvgpu_kzalloc(g, sizeof(*as_share));
|
||||
if (!as_share)
|
||||
if (!as_share) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
as_share->as = &g->as;
|
||||
as_share->id = generate_as_share_id(as_share->as);
|
||||
|
||||
/* this will set as_share->vm. */
|
||||
err = gk20a_busy(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto failed;
|
||||
}
|
||||
err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
|
||||
gk20a_idle(g);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
*out = as_share;
|
||||
return 0;
|
||||
@@ -154,8 +162,9 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
|
||||
|
||||
err = gk20a_busy(g);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
goto release_fail;
|
||||
}
|
||||
|
||||
err = gk20a_vm_release_share(as_share);
|
||||
|
||||
|
||||
@@ -53,14 +53,16 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
||||
u32 val = gk20a_readl(g, bus_bind_status_r());
|
||||
u32 pending = bus_bind_status_bar1_pending_v(val);
|
||||
u32 outstanding = bus_bind_status_bar1_outstanding_v(val);
|
||||
if (!pending && !outstanding)
|
||||
if (!pending && !outstanding) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_udelay(5);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout))
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -50,14 +50,16 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
|
||||
u32 val = gk20a_readl(g, bus_bind_status_r());
|
||||
u32 pending = bus_bind_status_bar2_pending_v(val);
|
||||
u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
|
||||
if (!pending && !outstanding)
|
||||
if (!pending && !outstanding) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_udelay(5);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout))
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-18, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,8 +34,9 @@ int nvgpu_init_enabled_flags(struct gk20a *g)
|
||||
g->enabled_flags = nvgpu_kzalloc(g,
|
||||
BITS_TO_LONGS(NVGPU_MAX_ENABLED_BITS) *
|
||||
sizeof(unsigned long));
|
||||
if (!g->enabled_flags)
|
||||
if (!g->enabled_flags) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -55,8 +56,9 @@ bool nvgpu_is_enabled(struct gk20a *g, int flag)
|
||||
|
||||
bool __nvgpu_set_enabled(struct gk20a *g, int flag, bool state)
|
||||
{
|
||||
if (state)
|
||||
if (state) {
|
||||
return test_and_set_bit(flag, g->enabled_flags);
|
||||
else
|
||||
} else {
|
||||
return test_and_clear_bit(flag, g->enabled_flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,12 +50,14 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
|
||||
do {
|
||||
idle_stat = flcn_ops->is_falcon_idle(flcn);
|
||||
|
||||
if (idle_stat)
|
||||
if (idle_stat) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (nvgpu_timeout_expired_msg(&timeout,
|
||||
"waiting for falcon idle: 0x%08x", idle_stat))
|
||||
"waiting for falcon idle: 0x%08x", idle_stat)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvgpu_usleep_range(100, 200);
|
||||
} while (1);
|
||||
@@ -74,13 +76,15 @@ int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn)
|
||||
MEM_SCRUBBING_TIMEOUT_DEFAULT,
|
||||
NVGPU_TIMER_RETRY_TIMER);
|
||||
do {
|
||||
if (nvgpu_flcn_get_mem_scrubbing_status(flcn))
|
||||
if (nvgpu_flcn_get_mem_scrubbing_status(flcn)) {
|
||||
goto exit;
|
||||
}
|
||||
nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout))
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
exit:
|
||||
return status;
|
||||
@@ -92,8 +96,9 @@ int nvgpu_flcn_reset(struct nvgpu_falcon *flcn)
|
||||
|
||||
if (flcn->flcn_ops.reset) {
|
||||
status = flcn->flcn_ops.reset(flcn);
|
||||
if (!status)
|
||||
if (!status) {
|
||||
status = nvgpu_flcn_mem_scrub_wait(flcn);
|
||||
}
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
@@ -112,9 +117,10 @@ void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
||||
flcn->intr_mask = intr_mask;
|
||||
flcn->intr_dest = intr_dest;
|
||||
flcn_ops->set_irq(flcn, enable);
|
||||
} else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
}
|
||||
|
||||
bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
|
||||
@@ -122,11 +128,12 @@ bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_scrubbing_done)
|
||||
if (flcn_ops->is_falcon_scrubbing_done) {
|
||||
status = flcn_ops->is_falcon_scrubbing_done(flcn);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -136,11 +143,12 @@ bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn)
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_cpu_halted)
|
||||
if (flcn_ops->is_falcon_cpu_halted) {
|
||||
status = flcn_ops->is_falcon_cpu_halted(flcn);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -153,14 +161,16 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
|
||||
|
||||
nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
||||
do {
|
||||
if (nvgpu_flcn_get_cpu_halted_status(flcn))
|
||||
if (nvgpu_flcn_get_cpu_halted_status(flcn)) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_udelay(10);
|
||||
} while (!nvgpu_timeout_expired(&to));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&to))
|
||||
if (nvgpu_timeout_peek_expired(&to)) {
|
||||
status = -EBUSY;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -181,14 +191,16 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
||||
|
||||
nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
||||
do {
|
||||
if (flcn_ops->clear_halt_interrupt_status(flcn))
|
||||
if (flcn_ops->clear_halt_interrupt_status(flcn)) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_udelay(1);
|
||||
} while (!nvgpu_timeout_expired(&to));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&to))
|
||||
if (nvgpu_timeout_peek_expired(&to)) {
|
||||
status = -EBUSY;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -198,11 +210,12 @@ bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn)
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_idle)
|
||||
if (flcn_ops->is_falcon_idle) {
|
||||
status = flcn_ops->is_falcon_idle(flcn);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -229,11 +242,12 @@ int nvgpu_flcn_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn_ops->copy_from_imem)
|
||||
if (flcn_ops->copy_from_imem) {
|
||||
status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -244,12 +258,13 @@ int nvgpu_flcn_copy_to_imem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn_ops->copy_to_imem)
|
||||
if (flcn_ops->copy_to_imem) {
|
||||
status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
|
||||
sec, tag);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -269,15 +284,17 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
|
||||
do {
|
||||
byte_read_count = total_block_read ? sizeof(buff) : size;
|
||||
|
||||
if (!byte_read_count)
|
||||
if (!byte_read_count) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (mem_type == MEM_DMEM)
|
||||
if (mem_type == MEM_DMEM) {
|
||||
status = nvgpu_flcn_copy_from_dmem(flcn, src,
|
||||
(u8 *)buff, byte_read_count, 0);
|
||||
else
|
||||
} else {
|
||||
status = nvgpu_flcn_copy_from_imem(flcn, src,
|
||||
(u8 *)buff, byte_read_count, 0);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
nvgpu_err(flcn->g, "MEM print failed");
|
||||
@@ -312,11 +329,12 @@ int nvgpu_flcn_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn_ops->bootstrap)
|
||||
if (flcn_ops->bootstrap) {
|
||||
status = flcn_ops->bootstrap(flcn, boot_vector);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -326,11 +344,12 @@ u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index)
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
u32 data = 0;
|
||||
|
||||
if (flcn_ops->mailbox_read)
|
||||
if (flcn_ops->mailbox_read) {
|
||||
data = flcn_ops->mailbox_read(flcn, mailbox_index);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
@@ -340,22 +359,24 @@ void nvgpu_flcn_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index,
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->mailbox_write)
|
||||
if (flcn_ops->mailbox_write) {
|
||||
flcn_ops->mailbox_write(flcn, mailbox_index, data);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->dump_falcon_stats)
|
||||
if (flcn_ops->dump_falcon_stats) {
|
||||
flcn_ops->dump_falcon_stats(flcn);
|
||||
else
|
||||
} else {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
|
||||
|
||||
@@ -74,8 +74,9 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
hw. Use the power_on flag to skip tlb invalidation when gpu
|
||||
power is turned off */
|
||||
|
||||
if (!g->power_on)
|
||||
if (!g->power_on) {
|
||||
return;
|
||||
}
|
||||
|
||||
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
|
||||
|
||||
@@ -87,14 +88,16 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
|
||||
do {
|
||||
data = gk20a_readl(g, fb_mmu_ctrl_r());
|
||||
if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0)
|
||||
if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0) {
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(2);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"wait mmu fifo space"));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout))
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -112,8 +115,9 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
do {
|
||||
data = gk20a_readl(g, fb_mmu_ctrl_r());
|
||||
if (fb_mmu_ctrl_pri_fifo_empty_v(data) !=
|
||||
fb_mmu_ctrl_pri_fifo_empty_false_f())
|
||||
fb_mmu_ctrl_pri_fifo_empty_false_f()) {
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(2);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"wait mmu invalidate"));
|
||||
|
||||
@@ -148,8 +148,9 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
|
||||
|
||||
val = gk20a_readl(g, fb_mmu_vpr_info_r());
|
||||
if (fb_mmu_vpr_info_fetch_v(val) ==
|
||||
fb_mmu_vpr_info_fetch_false_v())
|
||||
fb_mmu_vpr_info_fetch_false_v()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
|
||||
@@ -200,8 +200,9 @@ int gv100_fb_memory_unlock(struct gk20a *g)
|
||||
falcon_falcon_sctl_r()));
|
||||
|
||||
exit:
|
||||
if (mem_unlock_fw)
|
||||
if (mem_unlock_fw) {
|
||||
nvgpu_release_firmware(g, mem_unlock_fw);
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done, status - %d", err);
|
||||
|
||||
|
||||
@@ -97,12 +97,13 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
||||
u64 compbit_store_iova;
|
||||
u64 compbit_base_post_divide64;
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
|
||||
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
|
||||
compbit_store_iova = nvgpu_mem_get_phys_addr(g,
|
||||
&gr->compbit_store.mem);
|
||||
else
|
||||
} else {
|
||||
compbit_store_iova = nvgpu_mem_get_addr(g,
|
||||
&gr->compbit_store.mem);
|
||||
}
|
||||
/* must be aligned to 64 KB */
|
||||
compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K);
|
||||
|
||||
@@ -115,12 +116,14 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
|
||||
compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
|
||||
g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();
|
||||
|
||||
if (compbit_base_post_multiply64 < compbit_store_iova)
|
||||
if (compbit_base_post_multiply64 < compbit_store_iova) {
|
||||
compbit_base_post_divide++;
|
||||
}
|
||||
|
||||
if (g->ops.ltc.cbc_fix_config)
|
||||
if (g->ops.ltc.cbc_fix_config) {
|
||||
compbit_base_post_divide =
|
||||
g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
|
||||
}
|
||||
|
||||
gk20a_writel(g, fb_mmu_cbc_base_r(),
|
||||
fb_mmu_cbc_base_address_f(compbit_base_post_divide));
|
||||
@@ -250,8 +253,9 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
|
||||
/* while the fault is being handled it is possible for overflow
|
||||
* to happen,
|
||||
*/
|
||||
if (reg_val & fb_mmu_fault_buffer_get_overflow_m())
|
||||
if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) {
|
||||
reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
|
||||
}
|
||||
|
||||
g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
|
||||
|
||||
@@ -341,8 +345,10 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
fault_status = g->ops.fb.read_mmu_fault_status(g);
|
||||
|
||||
do {
|
||||
if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
|
||||
if (!(fault_status &
|
||||
fb_mmu_fault_status_busy_true_f())) {
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Make sure fault buffer is disabled.
|
||||
* This is to avoid accessing fault buffer by hw
|
||||
@@ -435,19 +441,23 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
|
||||
|
||||
/* clear the interrupt */
|
||||
if ((corrected_delta > 0) || corrected_overflow)
|
||||
if ((corrected_delta > 0) || corrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow)
|
||||
}
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
|
||||
}
|
||||
|
||||
gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
|
||||
fb_mmu_l2tlb_ecc_status_reset_clear_f());
|
||||
|
||||
/* Handle overflow */
|
||||
if (corrected_overflow)
|
||||
if (corrected_overflow) {
|
||||
corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
|
||||
if (uncorrected_overflow)
|
||||
}
|
||||
if (uncorrected_overflow) {
|
||||
uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
|
||||
}
|
||||
|
||||
|
||||
g->ecc.fb.mmu_l2tlb_ecc_corrected_err_count[0].counter +=
|
||||
@@ -455,12 +465,17 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter +=
|
||||
uncorrected_delta;
|
||||
|
||||
if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m())
|
||||
if (ecc_status &
|
||||
fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
|
||||
if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m())
|
||||
}
|
||||
if (ecc_status &
|
||||
fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
|
||||
if (corrected_overflow || uncorrected_overflow)
|
||||
}
|
||||
if (corrected_overflow || uncorrected_overflow) {
|
||||
nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr,
|
||||
"ecc error address: 0x%x", ecc_addr);
|
||||
@@ -493,19 +508,23 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
|
||||
|
||||
/* clear the interrupt */
|
||||
if ((corrected_delta > 0) || corrected_overflow)
|
||||
if ((corrected_delta > 0) || corrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow)
|
||||
}
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
|
||||
}
|
||||
|
||||
gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
|
||||
fb_mmu_hubtlb_ecc_status_reset_clear_f());
|
||||
|
||||
/* Handle overflow */
|
||||
if (corrected_overflow)
|
||||
if (corrected_overflow) {
|
||||
corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
|
||||
if (uncorrected_overflow)
|
||||
}
|
||||
if (uncorrected_overflow) {
|
||||
uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
|
||||
}
|
||||
|
||||
|
||||
g->ecc.fb.mmu_hubtlb_ecc_corrected_err_count[0].counter +=
|
||||
@@ -513,12 +532,15 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter +=
|
||||
uncorrected_delta;
|
||||
|
||||
if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m())
|
||||
if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
|
||||
if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m())
|
||||
}
|
||||
if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
|
||||
if (corrected_overflow || uncorrected_overflow)
|
||||
}
|
||||
if (corrected_overflow || uncorrected_overflow) {
|
||||
nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr,
|
||||
"ecc error address: 0x%x", ecc_addr);
|
||||
@@ -551,19 +573,23 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
|
||||
|
||||
/* clear the interrupt */
|
||||
if ((corrected_delta > 0) || corrected_overflow)
|
||||
if ((corrected_delta > 0) || corrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow)
|
||||
}
|
||||
if ((uncorrected_delta > 0) || uncorrected_overflow) {
|
||||
gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
|
||||
}
|
||||
|
||||
gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
|
||||
fb_mmu_fillunit_ecc_status_reset_clear_f());
|
||||
|
||||
/* Handle overflow */
|
||||
if (corrected_overflow)
|
||||
if (corrected_overflow) {
|
||||
corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
|
||||
if (uncorrected_overflow)
|
||||
}
|
||||
if (uncorrected_overflow) {
|
||||
uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
|
||||
}
|
||||
|
||||
|
||||
g->ecc.fb.mmu_fillunit_ecc_corrected_err_count[0].counter +=
|
||||
@@ -571,17 +597,26 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter +=
|
||||
uncorrected_delta;
|
||||
|
||||
if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m())
|
||||
if (ecc_status &
|
||||
fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
|
||||
if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m())
|
||||
}
|
||||
if (ecc_status &
|
||||
fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
|
||||
if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m())
|
||||
}
|
||||
if (ecc_status &
|
||||
fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
|
||||
if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m())
|
||||
}
|
||||
if (ecc_status &
|
||||
fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
|
||||
}
|
||||
|
||||
if (corrected_overflow || uncorrected_overflow)
|
||||
if (corrected_overflow || uncorrected_overflow) {
|
||||
nvgpu_info(g, "mmu fillunit ecc counter overflow!");
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr,
|
||||
"ecc error address: 0x%x", ecc_addr);
|
||||
@@ -594,33 +629,37 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
|
||||
static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
|
||||
{
|
||||
if (WARN_ON(mmfault->fault_type >=
|
||||
ARRAY_SIZE(fault_type_descs_gv11b)))
|
||||
ARRAY_SIZE(fault_type_descs_gv11b))) {
|
||||
mmfault->fault_type_desc = invalid_str;
|
||||
else
|
||||
} else {
|
||||
mmfault->fault_type_desc =
|
||||
fault_type_descs_gv11b[mmfault->fault_type];
|
||||
}
|
||||
|
||||
if (WARN_ON(mmfault->client_type >=
|
||||
ARRAY_SIZE(fault_client_type_descs_gv11b)))
|
||||
ARRAY_SIZE(fault_client_type_descs_gv11b))) {
|
||||
mmfault->client_type_desc = invalid_str;
|
||||
else
|
||||
} else {
|
||||
mmfault->client_type_desc =
|
||||
fault_client_type_descs_gv11b[mmfault->client_type];
|
||||
}
|
||||
|
||||
mmfault->client_id_desc = invalid_str;
|
||||
if (mmfault->client_type ==
|
||||
gmmu_fault_client_type_hub_v()) {
|
||||
|
||||
if (!(WARN_ON(mmfault->client_id >=
|
||||
ARRAY_SIZE(hub_client_descs_gv11b))))
|
||||
ARRAY_SIZE(hub_client_descs_gv11b)))) {
|
||||
mmfault->client_id_desc =
|
||||
hub_client_descs_gv11b[mmfault->client_id];
|
||||
}
|
||||
} else if (mmfault->client_type ==
|
||||
gmmu_fault_client_type_gpc_v()) {
|
||||
if (!(WARN_ON(mmfault->client_id >=
|
||||
ARRAY_SIZE(gpc_client_descs_gv11b))))
|
||||
ARRAY_SIZE(gpc_client_descs_gv11b)))) {
|
||||
mmfault->client_id_desc =
|
||||
gpc_client_descs_gv11b[mmfault->client_id];
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -719,8 +758,9 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
|
||||
|
||||
/* refch will be put back after fault is handled */
|
||||
refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
|
||||
if (refch)
|
||||
if (refch) {
|
||||
chid = refch->chid;
|
||||
}
|
||||
|
||||
/* it is ok to continue even if refch is NULL */
|
||||
mmfault->refch = refch;
|
||||
@@ -803,8 +843,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
u32 id = FIFO_INVAL_TSG_ID;
|
||||
unsigned int rc_type = RC_TYPE_NO_RC;
|
||||
|
||||
if (!mmfault->valid)
|
||||
if (!mmfault->valid) {
|
||||
return;
|
||||
}
|
||||
|
||||
gv11b_fb_print_fault_info(g, mmfault);
|
||||
|
||||
@@ -877,8 +918,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
rc_type = RC_TYPE_MMU_FAULT;
|
||||
if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
|
||||
id = mmfault->refch->tsgid;
|
||||
if (id != FIFO_INVAL_TSG_ID)
|
||||
if (id != FIFO_INVAL_TSG_ID) {
|
||||
id_type = ID_TYPE_TSG;
|
||||
}
|
||||
} else {
|
||||
nvgpu_err(g, "bare channels not supported");
|
||||
}
|
||||
@@ -898,19 +940,21 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
mmfault->refch = NULL;
|
||||
}
|
||||
|
||||
if (rc_type != RC_TYPE_NO_RC)
|
||||
if (rc_type != RC_TYPE_NO_RC) {
|
||||
g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
|
||||
id, id_type, rc_type, mmfault);
|
||||
}
|
||||
} else {
|
||||
if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
|
||||
err = gv11b_fb_fix_page_fault(g, mmfault);
|
||||
if (err)
|
||||
if (err) {
|
||||
*invalidate_replay_val |=
|
||||
fb_mmu_invalidate_replay_cancel_global_f();
|
||||
else
|
||||
} else {
|
||||
*invalidate_replay_val |=
|
||||
fb_mmu_invalidate_replay_start_ack_all_f();
|
||||
}
|
||||
} else {
|
||||
/* cancel faults other than invalid pte */
|
||||
*invalidate_replay_val |=
|
||||
@@ -1026,8 +1070,9 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
|
||||
}
|
||||
if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
|
||||
invalidate_replay_val != 0U)
|
||||
invalidate_replay_val != 0U) {
|
||||
gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
|
||||
}
|
||||
}
|
||||
|
||||
static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
|
||||
@@ -1057,8 +1102,9 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
|
||||
|
||||
/* refch will be put back after fault is handled */
|
||||
refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
|
||||
if (refch)
|
||||
if (refch) {
|
||||
chid = refch->chid;
|
||||
}
|
||||
|
||||
/* It is still ok to continue if refch is NULL */
|
||||
mmfault->refch = refch;
|
||||
@@ -1180,15 +1226,17 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
|
||||
{
|
||||
if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
gv11b_fb_fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
}
|
||||
}
|
||||
gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
|
||||
|
||||
@@ -1224,9 +1272,10 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
|
||||
gv11b_fb_handle_mmu_fault_common(g, mmfault,
|
||||
&invalidate_replay_val);
|
||||
|
||||
if (invalidate_replay_val)
|
||||
if (invalidate_replay_val) {
|
||||
gv11b_fb_replay_or_cancel_faults(g,
|
||||
invalidate_replay_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1254,8 +1303,9 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
|
||||
{
|
||||
u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
|
||||
|
||||
if (!(fault_status & fb_mmu_fault_status_replayable_m()))
|
||||
if (!(fault_status & fb_mmu_fault_status_replayable_m())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
@@ -1349,16 +1399,19 @@ void gv11b_fb_hub_isr(struct gk20a *g)
|
||||
nvgpu_info(g, "ecc uncorrected error notify");
|
||||
|
||||
status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
|
||||
if (status)
|
||||
if (status) {
|
||||
gv11b_handle_l2tlb_ecc_isr(g, status);
|
||||
}
|
||||
|
||||
status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
|
||||
if (status)
|
||||
if (status) {
|
||||
gv11b_handle_hubtlb_ecc_isr(g, status);
|
||||
}
|
||||
|
||||
status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
|
||||
if (status)
|
||||
if (status) {
|
||||
gv11b_handle_fillunit_ecc_isr(g, status);
|
||||
}
|
||||
}
|
||||
if (niso_intr &
|
||||
(fb_niso_intr_mmu_other_fault_notify_m() |
|
||||
@@ -1382,8 +1435,9 @@ bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
|
||||
fb_niso_intr_mmu_replayable_fault_notify_m() |
|
||||
fb_niso_intr_mmu_replayable_fault_overflow_m() |
|
||||
fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
|
||||
fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
|
||||
fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -1420,8 +1474,9 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
|
||||
nvgpu_udelay(5);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"invalidate replay failed on 0x%llx"));
|
||||
if (err)
|
||||
if (err) {
|
||||
nvgpu_err(g, "invalidate replay timedout");
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&g->mm.tlb_lock);
|
||||
|
||||
@@ -1460,8 +1515,9 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
|
||||
}
|
||||
|
||||
pte[0] |= gmmu_new_pte_valid_true_f();
|
||||
if (pte[0] & gmmu_new_pte_read_only_true_f())
|
||||
if (pte[0] & gmmu_new_pte_read_only_true_f()) {
|
||||
pte[0] &= ~(gmmu_new_pte_read_only_true_f());
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
|
||||
"new pte: %#08x %#08x", pte[1], pte[0]);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user