mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: add include/nvgpu/mmu_fault.h
Move mmu_fault_info struct from mm.h to mmu_fault.h Rename and move below hash defines to mmu_fault.h NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY -> NVGPU_MMU_FAULT_NONREPLAY_INDX NVGPU_MM_MMU_FAULT_TYPE_REPLAY -> NVGPU_MMU_FAULT_REPLAY_INDX FAULT_TYPE_NUM -> NVGPU_MMU_FAULT_TYPE_NUM NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX -> NVGPU_MMU_FAULT_NONREPLAY_REG_INDX NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX -> NVGPU_MMU_FAULT_REPLAY_REG_INDX NVGPU_FB_MMU_FAULT_BUF_DISABLED -> NVGPU_MMU_FAULT_BUF_DISABLED NVGPU_FB_MMU_FAULT_BUF_ENABLED -> NVGPU_MMU_FAULT_BUF_ENABLED JIRA NVGPU-1313 Change-Id: I3d4d56f881a5c3856c005db6dc7d850be4bc041d Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2107772 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
906fd57c18
commit
1a85ecf1ed
@@ -67,17 +67,17 @@ void gv11b_mm_mmu_fault_disable_hw(struct gk20a *g)
|
||||
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
|
||||
|
||||
if ((g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))) {
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX))) {
|
||||
g->ops.fb.fault_buf_set_state_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX,
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX,
|
||||
NVGPU_MMU_FAULT_BUF_DISABLED);
|
||||
}
|
||||
|
||||
if ((g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))) {
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX))) {
|
||||
g->ops.fb.fault_buf_set_state_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX,
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX,
|
||||
NVGPU_MMU_FAULT_BUF_DISABLED);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
|
||||
@@ -92,13 +92,14 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
|
||||
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
|
||||
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
|
||||
nvgpu_dma_unmap_free(vm,
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
|
||||
}
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
|
||||
nvgpu_dma_unmap_free(vm,
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
|
||||
@@ -121,10 +122,10 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
(size_t)gmmu_fault_buf_size_v();
|
||||
|
||||
if (!nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
|
||||
|
||||
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX]);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
|
||||
@@ -134,9 +135,9 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (!nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
|
||||
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX]);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
|
||||
@@ -149,13 +150,14 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_NONREPLAY_INDX])) {
|
||||
g->ops.fb.fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
|
||||
}
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[NVGPU_MMU_FAULT_REPLAY_INDX])) {
|
||||
g->ops.fb.fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -159,7 +159,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
|
||||
if (state == NVGPU_FB_MMU_FAULT_BUF_ENABLED) {
|
||||
if (state == NVGPU_MMU_FAULT_BUF_ENABLED) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, index)) {
|
||||
nvgpu_log_info(g, "fault buffer is already enabled");
|
||||
} else {
|
||||
@@ -208,7 +208,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index,
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
NVGPU_MMU_FAULT_BUF_DISABLED);
|
||||
addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
|
||||
fb_mmu_fault_buffer_lo_addr_b());
|
||||
addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
|
||||
@@ -221,7 +221,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
|
||||
fb_mmu_fault_buffer_size_val_f(g->ops.channel.count(g)) |
|
||||
fb_mmu_fault_buffer_size_overflow_intr_enable_f());
|
||||
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_FB_MMU_FAULT_BUF_ENABLED);
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_MMU_FAULT_BUF_ENABLED);
|
||||
}
|
||||
|
||||
void gv11b_fb_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
|
||||
@@ -441,7 +441,7 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
|
||||
u32 fault_status)
|
||||
{
|
||||
u32 reg_val;
|
||||
u32 index = NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX;
|
||||
u32 index = NVGPU_MMU_FAULT_REPLAY_REG_INDX;
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
|
||||
|
||||
@@ -476,7 +476,7 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
|
||||
u32 fault_status)
|
||||
{
|
||||
u32 reg_val;
|
||||
u32 index = NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX;
|
||||
u32 index = NVGPU_MMU_FAULT_NONREPLAY_REG_INDX;
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
|
||||
|
||||
@@ -514,16 +514,17 @@ void gv11b_fb_handle_bar2_fault(struct gk20a *g,
|
||||
if ((fault_status &
|
||||
fb_mmu_fault_status_non_replayable_error_m()) != 0U) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
|
||||
gv11b_fb_fault_buf_configure_hw(g,
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
|
||||
}
|
||||
}
|
||||
|
||||
if ((fault_status & fb_mmu_fault_status_replayable_error_m()) != 0U) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
|
||||
gv11b_fb_fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
|
||||
}
|
||||
}
|
||||
g->ops.ce.mthd_buffer_fault_in_bar2_fault(g);
|
||||
@@ -565,10 +566,10 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
|
||||
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -586,14 +587,14 @@ void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
|
||||
gv11b_gmmu_handle_other_fault_notify(g, fault_status);
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
|
||||
|
||||
if ((niso_intr &
|
||||
fb_niso_intr_mmu_nonreplayable_fault_notify_m()) != 0U) {
|
||||
|
||||
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
|
||||
|
||||
/*
|
||||
* When all the faults are processed,
|
||||
@@ -610,14 +611,14 @@ void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
|
||||
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
|
||||
|
||||
if ((niso_intr &
|
||||
fb_niso_intr_mmu_replayable_fault_notify_m()) != 0U) {
|
||||
|
||||
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
|
||||
}
|
||||
if ((niso_intr &
|
||||
fb_niso_intr_mmu_replayable_fault_overflow_m()) != 0U) {
|
||||
|
||||
@@ -48,9 +48,9 @@ void tu104_fb_handle_mmu_fault(struct gk20a *g)
|
||||
{
|
||||
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
|
||||
u32 nonreplay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
|
||||
u32 replay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
|
||||
u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
|
||||
@@ -65,7 +65,7 @@ void tu104_fb_handle_mmu_fault(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
|
||||
if (intr_tu104_vector_intr_pending(g,
|
||||
fb_mmu_int_vector_fault_notify_v(nonreplay_fault))) {
|
||||
intr_tu104_intr_clear_leaf_vector(g,
|
||||
@@ -73,7 +73,7 @@ void tu104_fb_handle_mmu_fault(struct gk20a *g)
|
||||
|
||||
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX);
|
||||
|
||||
/*
|
||||
* When all the faults are processed,
|
||||
@@ -93,7 +93,7 @@ void tu104_fb_handle_mmu_fault(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
|
||||
if (intr_tu104_vector_intr_pending(g,
|
||||
fb_mmu_int_vector_fault_notify_v(replay_fault))) {
|
||||
intr_tu104_intr_clear_leaf_vector(g,
|
||||
@@ -101,7 +101,7 @@ void tu104_fb_handle_mmu_fault(struct gk20a *g)
|
||||
|
||||
gv11b_gmmu_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX);
|
||||
}
|
||||
|
||||
if (intr_tu104_vector_intr_pending(g,
|
||||
|
||||
@@ -37,9 +37,9 @@ void tu104_fb_intr_enable(struct gk20a *g)
|
||||
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
|
||||
u32 nonreplay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
|
||||
u32 replay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
|
||||
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
|
||||
|
||||
intr_tu104_vector_en_set(g,
|
||||
@@ -61,9 +61,9 @@ void tu104_fb_intr_disable(struct gk20a *g)
|
||||
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
|
||||
u32 nonreplay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
|
||||
u32 replay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
|
||||
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
|
||||
|
||||
intr_tu104_vector_en_clear(g,
|
||||
@@ -85,9 +85,9 @@ void tu104_fb_intr_isr(struct gk20a *g)
|
||||
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
|
||||
u32 nonreplay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
|
||||
u32 replay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
|
||||
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
|
||||
|
||||
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
|
||||
@@ -118,9 +118,9 @@ bool tu104_fb_intr_is_mmu_fault_pending(struct gk20a *g)
|
||||
{
|
||||
u32 info_fault = nvgpu_readl(g, fb_mmu_int_vector_info_fault_r());
|
||||
u32 nonreplay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_NONREPLAY_REG_INDX));
|
||||
u32 replay_fault = nvgpu_readl(g,
|
||||
fb_mmu_int_vector_fault_r(NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX));
|
||||
fb_mmu_int_vector_fault_r(NVGPU_MMU_FAULT_REPLAY_REG_INDX));
|
||||
u32 ecc_error = nvgpu_readl(g, fb_mmu_int_vector_ecc_error_r());
|
||||
|
||||
if (intr_tu104_vector_intr_pending(g,
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/ltc.h>
|
||||
#include <nvgpu/rc.h>
|
||||
#include <nvgpu/mmu_fault.h>
|
||||
|
||||
#include <hal/fb/fb_mmu_fault_gv11b.h>
|
||||
#include <hal/mm/gmmu/gmmu_mmu_fault_gv11b.h>
|
||||
@@ -441,7 +442,7 @@ void gv11b_gmmu_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
return;
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT",
|
||||
index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX ?
|
||||
index == NVGPU_MMU_FAULT_REPLAY_REG_INDX ?
|
||||
"REPLAY" : "NON-REPLAY");
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
|
||||
@@ -477,7 +478,7 @@ void gv11b_gmmu_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
rd32_val = nvgpu_mem_rd32(g, mem,
|
||||
offset + gmmu_fault_buf_entry_valid_w());
|
||||
|
||||
if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
|
||||
if (index == NVGPU_MMU_FAULT_REPLAY_REG_INDX &&
|
||||
mmufault->fault_addr != 0ULL) {
|
||||
/*
|
||||
* fault_addr "0" is not supposed to be fixed ever.
|
||||
@@ -503,7 +504,7 @@ void gv11b_gmmu_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
&invalidate_replay_val);
|
||||
|
||||
}
|
||||
if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
|
||||
if (index == NVGPU_MMU_FAULT_REPLAY_REG_INDX &&
|
||||
invalidate_replay_val != 0U) {
|
||||
gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
|
||||
}
|
||||
@@ -515,7 +516,7 @@ void gv11b_gmmu_handle_other_fault_notify(struct gk20a *g,
|
||||
struct mmu_fault_info *mmufault;
|
||||
u32 invalidate_replay_val = 0U;
|
||||
|
||||
mmufault = &g->mm.fault_info[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY];
|
||||
mmufault = &g->mm.fault_info[NVGPU_MMU_FAULT_NONREPLAY_INDX];
|
||||
|
||||
gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmufault);
|
||||
|
||||
|
||||
@@ -207,12 +207,6 @@ enum nvgpu_event_id_type {
|
||||
* in nvgpu/enabled.h
|
||||
*/
|
||||
|
||||
/* index for FB fault buffer functions */
|
||||
#define NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX 0U
|
||||
#define NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX 1U
|
||||
#define NVGPU_FB_MMU_FAULT_BUF_DISABLED 0U
|
||||
#define NVGPU_FB_MMU_FAULT_BUF_ENABLED 1U
|
||||
|
||||
/* Parameters for init_elcg_mode/init_blcg_mode */
|
||||
enum {
|
||||
ELCG_RUN, /* clk always run, i.e. disable elcg */
|
||||
|
||||
@@ -32,44 +32,13 @@
|
||||
#include <nvgpu/allocator.h>
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/sizes.h>
|
||||
#include <nvgpu/mmu_fault.h>
|
||||
|
||||
struct gk20a;
|
||||
struct vm_gk20a;
|
||||
struct nvgpu_mem;
|
||||
struct nvgpu_pd_cache;
|
||||
|
||||
#define NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY 0
|
||||
#define NVGPU_MM_MMU_FAULT_TYPE_REPLAY 1
|
||||
|
||||
#define FAULT_TYPE_NUM 2 /* replay and nonreplay faults */
|
||||
|
||||
struct mmu_fault_info {
|
||||
u64 inst_ptr;
|
||||
u32 inst_aperture;
|
||||
u64 fault_addr;
|
||||
u32 fault_addr_aperture;
|
||||
u32 timestamp_lo;
|
||||
u32 timestamp_hi;
|
||||
u32 mmu_engine_id;
|
||||
u32 gpc_id;
|
||||
u32 client_type;
|
||||
u32 client_id;
|
||||
u32 fault_type;
|
||||
u32 access_type;
|
||||
u32 protected_mode;
|
||||
bool replayable_fault;
|
||||
u32 replay_fault_en;
|
||||
bool valid;
|
||||
u32 faulted_pbdma;
|
||||
u32 faulted_engine;
|
||||
u32 faulted_subid;
|
||||
u32 chid;
|
||||
struct channel_gk20a *refch;
|
||||
const char *client_type_desc;
|
||||
const char *fault_type_desc;
|
||||
const char *client_id_desc;
|
||||
};
|
||||
|
||||
enum nvgpu_flush_op {
|
||||
NVGPU_FLUSH_DEFAULT,
|
||||
NVGPU_FLUSH_FB,
|
||||
@@ -131,8 +100,8 @@ struct mm_gk20a {
|
||||
|
||||
struct nvgpu_mem bar2_desc;
|
||||
|
||||
struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
|
||||
struct mmu_fault_info fault_info[FAULT_TYPE_NUM];
|
||||
struct nvgpu_mem hw_fault_buf[NVGPU_MMU_FAULT_TYPE_NUM];
|
||||
struct mmu_fault_info fault_info[NVGPU_MMU_FAULT_TYPE_NUM];
|
||||
struct nvgpu_mutex hub_isr_mutex;
|
||||
|
||||
/*
|
||||
|
||||
68
drivers/gpu/nvgpu/include/nvgpu/mmu_fault.h
Normal file
68
drivers/gpu/nvgpu/include/nvgpu/mmu_fault.h
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_MMU_FAULT_H
|
||||
#define NVGPU_MMU_FAULT_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
#define NVGPU_MMU_FAULT_NONREPLAY_INDX 0U
|
||||
#define NVGPU_MMU_FAULT_REPLAY_INDX 1U
|
||||
|
||||
/* replay and nonreplay faults */
|
||||
#define NVGPU_MMU_FAULT_TYPE_NUM 2U
|
||||
|
||||
#define NVGPU_MMU_FAULT_NONREPLAY_REG_INDX 0U
|
||||
#define NVGPU_MMU_FAULT_REPLAY_REG_INDX 1U
|
||||
#define NVGPU_MMU_FAULT_BUF_DISABLED 0U
|
||||
#define NVGPU_MMU_FAULT_BUF_ENABLED 1U
|
||||
|
||||
struct channel_gk20a;
|
||||
|
||||
struct mmu_fault_info {
|
||||
u64 inst_ptr;
|
||||
u32 inst_aperture;
|
||||
u64 fault_addr;
|
||||
u32 fault_addr_aperture;
|
||||
u32 timestamp_lo;
|
||||
u32 timestamp_hi;
|
||||
u32 mmu_engine_id;
|
||||
u32 gpc_id;
|
||||
u32 client_type;
|
||||
u32 client_id;
|
||||
u32 fault_type;
|
||||
u32 access_type;
|
||||
u32 protected_mode;
|
||||
bool replayable_fault;
|
||||
u32 replay_fault_en;
|
||||
bool valid;
|
||||
u32 faulted_pbdma;
|
||||
u32 faulted_engine;
|
||||
u32 faulted_subid;
|
||||
u32 chid;
|
||||
struct channel_gk20a *refch;
|
||||
const char *client_type_desc;
|
||||
const char *fault_type_desc;
|
||||
const char *client_id_desc;
|
||||
};
|
||||
|
||||
#endif /* NVGPU_MMU_FAULT_H */
|
||||
@@ -295,11 +295,11 @@ static int test_page_faults_disable_hw(struct unit_module *m, struct gk20a *g,
|
||||
{
|
||||
g->ops.mm.mmu_fault_disable_hw(g);
|
||||
if (g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_NONREPLAY_REG_INDX)) {
|
||||
unit_return_fail(m, "Non-replay buf still enabled\n");
|
||||
}
|
||||
if (g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
NVGPU_MMU_FAULT_REPLAY_REG_INDX)) {
|
||||
unit_return_fail(m, "Non-replay buf still enabled\n");
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user