mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Use HAL for calls from MM to FB
mm_gv11b.c has several direct calls to fb_gv11b.h. Redirect them to go via a HAL. Also make sure the HALs are using parameter with correct signedness and prefix the parameter constants with NVGPU_FB_MMU_. MMU buffer table indices were also defined in fb_gv11b.h, even though the tables themselves are defined in include/nvgpu/mm.h. Move the indices to include/nvgpu/mm.h and prefix them with NVGPU_MM_MMU_. JIRA NVGPU-714 Change-Id: Ieeae7c5664b8f53f8313cfad0a771d14637caa08 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1776131 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
96d4842c0d
commit
b07a304ba3
@@ -162,6 +162,13 @@ struct nvgpu_gpfifo_userdata {
|
||||
* should go in struct gk20a or be implemented with the boolean flag API defined
|
||||
* in nvgpu/enabled.h
|
||||
*/
|
||||
|
||||
/* index for FB fault buffer functions */
|
||||
#define NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX 0U
|
||||
#define NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX 1U
|
||||
#define NVGPU_FB_MMU_FAULT_BUF_DISABLED 0U
|
||||
#define NVGPU_FB_MMU_FAULT_BUF_ENABLED 1U
|
||||
|
||||
struct gpu_ops {
|
||||
struct {
|
||||
int (*determine_L2_size_bytes)(struct gk20a *gk20a);
|
||||
@@ -564,6 +571,11 @@ struct gpu_ops {
|
||||
u32 (*read_mmu_fault_status)(struct gk20a *g);
|
||||
int (*mmu_invalidate_replay)(struct gk20a *g,
|
||||
u32 invalidate_replay_val);
|
||||
bool (*mmu_fault_pending)(struct gk20a *g);
|
||||
bool (*is_fault_buf_enabled)(struct gk20a *g, u32 index);
|
||||
void (*fault_buf_set_state_hw)(struct gk20a *g,
|
||||
u32 index, u32 state);
|
||||
void (*fault_buf_configure_hw)(struct gk20a *g, u32 index);
|
||||
} fb;
|
||||
struct {
|
||||
void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod);
|
||||
|
||||
@@ -501,6 +501,10 @@ static const struct gpu_ops gv100_ops = {
|
||||
.read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
|
||||
.read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
|
||||
.mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
|
||||
.mmu_fault_pending = gv11b_fb_mmu_fault_pending,
|
||||
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
|
||||
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
|
||||
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
|
||||
},
|
||||
.clock_gating = {
|
||||
.slcg_bus_load_gating_prod =
|
||||
|
||||
@@ -223,17 +223,16 @@ static const char *const gpc_client_descs_gv11b[] = {
|
||||
"t1 36", "t1 37", "t1 38", "t1 39",
|
||||
};
|
||||
|
||||
u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
|
||||
unsigned int index)
|
||||
bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
|
||||
return fb_mmu_fault_buffer_size_enable_v(reg_val);
|
||||
return fb_mmu_fault_buffer_size_enable_v(reg_val) != 0U;
|
||||
}
|
||||
|
||||
static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
|
||||
unsigned int index, u32 next)
|
||||
u32 index, u32 next)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
@@ -257,8 +256,7 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
|
||||
nvgpu_mb();
|
||||
}
|
||||
|
||||
static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
|
||||
unsigned int index)
|
||||
static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
@@ -266,8 +264,7 @@ static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
|
||||
return fb_mmu_fault_buffer_get_ptr_v(reg_val);
|
||||
}
|
||||
|
||||
static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
|
||||
unsigned int index)
|
||||
static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
@@ -275,8 +272,7 @@ static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
|
||||
return fb_mmu_fault_buffer_put_ptr_v(reg_val);
|
||||
}
|
||||
|
||||
static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
|
||||
unsigned int index)
|
||||
static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
@@ -285,7 +281,7 @@ static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
|
||||
}
|
||||
|
||||
static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
|
||||
unsigned int index, u32 *get_idx)
|
||||
u32 index, u32 *get_idx)
|
||||
{
|
||||
u32 put_idx;
|
||||
|
||||
@@ -295,8 +291,7 @@ static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
|
||||
return *get_idx == put_idx;
|
||||
}
|
||||
|
||||
static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g,
|
||||
unsigned int index)
|
||||
static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 get_idx, put_idx, entries;
|
||||
|
||||
@@ -311,7 +306,7 @@ static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g,
|
||||
}
|
||||
|
||||
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
unsigned int index, unsigned int state)
|
||||
u32 index, u32 state)
|
||||
{
|
||||
u32 fault_status;
|
||||
u32 reg_val;
|
||||
@@ -319,7 +314,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
|
||||
if (state) {
|
||||
if (state == NVGPU_FB_MMU_FAULT_BUF_ENABLED) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, index)) {
|
||||
nvgpu_log_info(g, "fault buffer is already enabled");
|
||||
} else {
|
||||
@@ -358,7 +353,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
}
|
||||
}
|
||||
|
||||
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
|
||||
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
|
||||
{
|
||||
u32 addr_lo;
|
||||
u32 addr_hi;
|
||||
@@ -366,7 +361,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index,
|
||||
FAULT_BUF_DISABLED);
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
|
||||
ram_in_base_shift_v());
|
||||
addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
|
||||
@@ -379,7 +374,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
|
||||
fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
|
||||
fb_mmu_fault_buffer_size_overflow_intr_enable_f());
|
||||
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_FB_MMU_FAULT_BUF_ENABLED);
|
||||
}
|
||||
|
||||
void gv11b_fb_enable_hub_intr(struct gk20a *g)
|
||||
@@ -929,7 +924,7 @@ static int gv11b_fb_replay_or_cancel_faults(struct gk20a *g,
|
||||
}
|
||||
|
||||
void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
u32 fault_status, unsigned int index)
|
||||
u32 fault_status, u32 index)
|
||||
{
|
||||
u32 get_indx, offset, rd32_val, entries;
|
||||
struct nvgpu_mem *mem;
|
||||
@@ -944,7 +939,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
return;
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT" ,
|
||||
index == REPLAY_REG_INDEX ? "REPLAY" : "NON-REPLAY");
|
||||
index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX ?
|
||||
"REPLAY" : "NON-REPLAY");
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
|
||||
|
||||
@@ -978,7 +974,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
rd32_val = nvgpu_mem_rd32(g, mem,
|
||||
offset + gmmu_fault_buf_entry_valid_w());
|
||||
|
||||
if (index == REPLAY_REG_INDEX && mmfault->fault_addr != 0ULL) {
|
||||
if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
|
||||
mmfault->fault_addr != 0ULL) {
|
||||
/* fault_addr "0" is not supposed to be fixed ever.
|
||||
* For the first time when prev = 0, next = 0 and
|
||||
* fault addr is also 0 then handle_mmu_fault_common will
|
||||
@@ -998,7 +995,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
&invalidate_replay_val);
|
||||
|
||||
}
|
||||
if (index == REPLAY_REG_INDEX && invalidate_replay_val)
|
||||
if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
|
||||
invalidate_replay_val != 0U)
|
||||
gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
|
||||
}
|
||||
|
||||
@@ -1080,7 +1078,7 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
|
||||
u32 fault_status)
|
||||
{
|
||||
u32 reg_val;
|
||||
unsigned int index = REPLAY_REG_INDEX;
|
||||
u32 index = NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX;
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
|
||||
|
||||
@@ -1115,7 +1113,7 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
|
||||
u32 fault_status)
|
||||
{
|
||||
u32 reg_val;
|
||||
unsigned int index = NONREPLAY_REG_INDEX;
|
||||
u32 index = NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX;
|
||||
|
||||
reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
|
||||
|
||||
@@ -1151,13 +1149,16 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
|
||||
struct mmu_fault_info *mmfault, u32 fault_status)
|
||||
{
|
||||
if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))
|
||||
gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))
|
||||
gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
}
|
||||
|
||||
if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))
|
||||
gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))
|
||||
gv11b_fb_fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
}
|
||||
gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
|
||||
|
||||
@@ -1175,7 +1176,7 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
|
||||
struct mmu_fault_info *mmfault;
|
||||
u32 invalidate_replay_val = 0;
|
||||
|
||||
mmfault = &g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY];
|
||||
mmfault = &g->mm.fault_info[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY];
|
||||
|
||||
gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);
|
||||
|
||||
@@ -1226,9 +1227,11 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
|
||||
if (!(fault_status & fb_mmu_fault_status_replayable_m()))
|
||||
return;
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status, REPLAY_REG_INDEX);
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1246,13 +1249,14 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
|
||||
gv11b_fb_handle_other_fault_notify(g, fault_status);
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
|
||||
|
||||
if (niso_intr &
|
||||
fb_niso_intr_mmu_nonreplayable_fault_notify_m()) {
|
||||
|
||||
gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status, NONREPLAY_REG_INDEX);
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
|
||||
/*
|
||||
* When all the faults are processed,
|
||||
@@ -1269,13 +1273,14 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
|
||||
|
||||
}
|
||||
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) {
|
||||
if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
|
||||
|
||||
if (niso_intr &
|
||||
fb_niso_intr_mmu_replayable_fault_notify_m()) {
|
||||
|
||||
gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
|
||||
fault_status, REPLAY_REG_INDEX);
|
||||
fault_status,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
}
|
||||
if (niso_intr &
|
||||
fb_niso_intr_mmu_replayable_fault_overflow_m()) {
|
||||
|
||||
@@ -28,15 +28,6 @@
|
||||
#define NONREPLAY_REG_INDEX 0
|
||||
#define REPLAY_REG_INDEX 1
|
||||
|
||||
#define FAULT_BUF_DISABLED 0
|
||||
#define FAULT_BUF_ENABLED 1
|
||||
|
||||
#define FAULT_BUF_INVALID 0
|
||||
#define FAULT_BUF_VALID 1
|
||||
|
||||
#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
|
||||
#define FAULT_TYPE_REPLAY 1
|
||||
|
||||
struct gk20a;
|
||||
|
||||
void gv11b_fb_init_hw(struct gk20a *g);
|
||||
@@ -46,11 +37,10 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
|
||||
void gv11b_fb_reset(struct gk20a *g);
|
||||
void gv11b_fb_hub_isr(struct gk20a *g);
|
||||
|
||||
u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
|
||||
unsigned int index);
|
||||
bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index );
|
||||
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
||||
unsigned int index, unsigned int state);
|
||||
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
|
||||
u32 index, u32 state);
|
||||
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index);
|
||||
void gv11b_fb_enable_hub_intr(struct gk20a *g);
|
||||
void gv11b_fb_disable_hub_intr(struct gk20a *g);
|
||||
bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
|
||||
@@ -58,7 +48,7 @@ void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
|
||||
void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
|
||||
u32 fault_status);
|
||||
void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
|
||||
u32 fault_status, unsigned int index);
|
||||
u32 fault_status, u32 index);
|
||||
void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
|
||||
u32 fault_status);
|
||||
void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
|
||||
|
||||
@@ -465,6 +465,10 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
|
||||
.read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
|
||||
.mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
|
||||
.mmu_fault_pending = gv11b_fb_mmu_fault_pending,
|
||||
.is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
|
||||
.fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
|
||||
.fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
|
||||
},
|
||||
.clock_gating = {
|
||||
.slcg_bus_load_gating_prod =
|
||||
|
||||
@@ -35,10 +35,8 @@
|
||||
#include "gp10b/mc_gp10b.h"
|
||||
|
||||
#include "mm_gv11b.h"
|
||||
#include "fb_gv11b.h"
|
||||
#include "subctx_gv11b.h"
|
||||
|
||||
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
|
||||
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
|
||||
|
||||
#define NVGPU_L3_ALLOC_BIT BIT(36)
|
||||
@@ -66,7 +64,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
|
||||
|
||||
bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
|
||||
{
|
||||
return gv11b_fb_mmu_fault_pending(g);
|
||||
return g->ops.fb.mmu_fault_pending(g);
|
||||
}
|
||||
|
||||
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
|
||||
@@ -79,23 +77,27 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
|
||||
|
||||
g->ops.fb.disable_hub_intr(g);
|
||||
|
||||
if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
|
||||
gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
|
||||
FAULT_BUF_DISABLED);
|
||||
if ((g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))) {
|
||||
g->ops.fb.fault_buf_set_state_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX,
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
}
|
||||
|
||||
if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
|
||||
gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
|
||||
FAULT_BUF_DISABLED);
|
||||
if ((g->ops.fb.is_fault_buf_enabled(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))) {
|
||||
g->ops.fb.fault_buf_set_state_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX,
|
||||
NVGPU_FB_MMU_FAULT_BUF_DISABLED);
|
||||
}
|
||||
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
|
||||
nvgpu_dma_unmap_free(vm,
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
|
||||
nvgpu_dma_unmap_free(vm,
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
|
||||
|
||||
nvgpu_mutex_release(&g->mm.hub_isr_mutex);
|
||||
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
|
||||
@@ -117,10 +119,10 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
gmmu_fault_buf_size_v();
|
||||
|
||||
if (!nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
|
||||
|
||||
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"Error in hw mmu fault buf [0] alloc in bar2 vm ");
|
||||
@@ -130,9 +132,9 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (!nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) {
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
|
||||
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"Error in hw mmu fault buf [1] alloc in bar2 vm ");
|
||||
@@ -145,10 +147,12 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(
|
||||
&g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
|
||||
gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
|
||||
gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
|
||||
&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
|
||||
g->ops.fb.fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
|
||||
if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
|
||||
g->ops.fb.fault_buf_configure_hw(g,
|
||||
NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
|
||||
}
|
||||
|
||||
static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
|
||||
|
||||
@@ -38,6 +38,9 @@ struct vm_gk20a;
|
||||
struct nvgpu_mem;
|
||||
struct nvgpu_pd_cache;
|
||||
|
||||
#define NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY 0
|
||||
#define NVGPU_MM_MMU_FAULT_TYPE_REPLAY 1
|
||||
|
||||
#define FAULT_TYPE_NUM 2 /* replay and nonreplay faults */
|
||||
|
||||
struct mmu_fault_info {
|
||||
|
||||
Reference in New Issue
Block a user