gpu: nvgpu: Simplify FB hub intr enable

Hard code flags for enabling and disabling FB hub interrupts.

JIRA NVGPU-714

Change-Id: I806ef443cb9e27e221d407d633ca91d8fb40d075
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1769853
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2018-07-03 14:00:40 -07:00
committed by mobile promotions
parent 572fba2c52
commit a801c897df
12 changed files with 75 additions and 170 deletions

View File

@@ -113,6 +113,10 @@ int nvgpu_mm_suspend(struct gk20a *g)
g->ops.mm.cbc_clean(g);
g->ops.mm.l2_flush(g, false);
if (g->ops.fb.disable_hub_intr != NULL) {
g->ops.fb.disable_hub_intr(g);
}
nvgpu_log_info(g, "MM suspend done!");
return 0;

View File

@@ -542,10 +542,8 @@ struct gpu_ops {
int (*mem_unlock)(struct gk20a *g);
int (*init_nvlink)(struct gk20a *g);
int (*enable_nvlink)(struct gk20a *g);
void (*enable_hub_intr)(struct gk20a *g, unsigned int index,
unsigned int intr_type);
void (*disable_hub_intr)(struct gk20a *g, unsigned int index,
unsigned int intr_type);
void (*enable_hub_intr)(struct gk20a *g);
void (*disable_hub_intr)(struct gk20a *g);
int (*init_fbpa)(struct gk20a *g);
void (*fbpa_isr)(struct gk20a *g);
void (*write_mmu_fault_buffer_lo_hi)(struct gk20a *g, u32 index,

View File

@@ -70,6 +70,34 @@ void gv100_fb_reset(struct gk20a *g)
gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
}
void gv100_fb_enable_hub_intr(struct gk20a *g)
{
u32 mask = 0;
mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
gk20a_writel(g, fb_niso_intr_en_set_r(0),
mask);
}
void gv100_fb_disable_hub_intr(struct gk20a *g)
{
u32 mask = 0;
mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
gk20a_writel(g, fb_niso_intr_en_clr_r(0),
mask);
}
int gv100_fb_memory_unlock(struct gk20a *g)
{
struct nvgpu_firmware *mem_unlock_fw = NULL;

View File

@@ -28,6 +28,8 @@
struct gk20a;
void gv100_fb_reset(struct gk20a *g);
void gv100_fb_enable_hub_intr(struct gk20a *g);
void gv100_fb_disable_hub_intr(struct gk20a *g);
int gv100_fb_memory_unlock(struct gk20a *g);
int gv100_fb_init_nvlink(struct gk20a *g);
int gv100_fb_enable_nvlink(struct gk20a *g);

View File

@@ -459,7 +459,7 @@ static const struct gpu_ops gv100_ops = {
},
.fb = {
.reset = gv100_fb_reset,
.init_hw = gk20a_fb_init_hw,
.init_hw = gv11b_fb_init_hw,
.init_fs_state = NULL,
.set_mmu_page_size = gm20b_fb_set_mmu_page_size,
.set_use_full_comp_tag_line =
@@ -481,8 +481,8 @@ static const struct gpu_ops gv100_ops = {
.mem_unlock = gv100_fb_memory_unlock,
.init_nvlink = gv100_fb_init_nvlink,
.enable_nvlink = gv100_fb_enable_nvlink,
.enable_hub_intr = gv11b_fb_enable_hub_intr,
.disable_hub_intr = gv11b_fb_disable_hub_intr,
.enable_hub_intr = gv100_fb_enable_hub_intr,
.disable_hub_intr = gv100_fb_disable_hub_intr,
.write_mmu_fault_buffer_lo_hi =
fb_gv11b_write_mmu_fault_buffer_lo_hi,
.write_mmu_fault_buffer_get =

View File

@@ -41,7 +41,6 @@ void mc_gv100_intr_enable(struct gk20a *g)
0xffffffffU);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffffU);
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
@@ -56,9 +55,6 @@ void mc_gv100_intr_enable(struct gk20a *g)
mc_intr_pfifo_pending_f()
| eng_intr_mask;
/* TODO: Enable PRI faults for HUB ECC err intr */
g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);

View File

@@ -32,6 +32,7 @@
#include "gk20a/gk20a.h"
#include "gk20a/mm_gk20a.h"
#include "gk20a/fb_gk20a.h"
#include "gp10b/fb_gp10b.h"
@@ -58,6 +59,13 @@ static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
}
}
void gv11b_fb_init_hw(struct gk20a *g)
{
gk20a_fb_init_hw(g);
g->ops.fb.enable_hub_intr(g);
}
void gv11b_fb_init_fs_state(struct gk20a *g)
{
nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb");
@@ -374,118 +382,34 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
}
static void gv11b_fb_intr_en_set(struct gk20a *g,
unsigned int index, u32 mask)
{
u32 reg_val;
reg_val = gk20a_readl(g, fb_niso_intr_en_set_r(index));
reg_val |= mask;
gk20a_writel(g, fb_niso_intr_en_set_r(index), reg_val);
}
static void gv11b_fb_intr_en_clr(struct gk20a *g,
unsigned int index, u32 mask)
{
u32 reg_val;
reg_val = gk20a_readl(g, fb_niso_intr_en_clr_r(index));
reg_val |= mask;
gk20a_writel(g, fb_niso_intr_en_clr_r(index), reg_val);
}
static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
unsigned int intr_type)
void gv11b_fb_enable_hub_intr(struct gk20a *g)
{
u32 mask = 0;
if (intr_type & HUB_INTR_TYPE_OTHER) {
mask |=
fb_niso_intr_en_clr_mmu_other_fault_notify_m();
}
mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
mask |=
fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
}
if (intr_type & HUB_INTR_TYPE_REPLAY) {
mask |=
fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
}
if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
mask |=
fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
}
if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
mask |=
fb_niso_intr_en_clr_hub_access_counter_notify_m() |
fb_niso_intr_en_clr_hub_access_counter_error_m();
}
return mask;
gk20a_writel(g, fb_niso_intr_en_set_r(0),
mask);
}
static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g,
unsigned int intr_type)
void gv11b_fb_disable_hub_intr(struct gk20a *g)
{
u32 mask = 0;
if (intr_type & HUB_INTR_TYPE_OTHER) {
mask |=
fb_niso_intr_en_set_mmu_other_fault_notify_m();
}
mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
mask |=
fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
}
if (intr_type & HUB_INTR_TYPE_REPLAY) {
mask |=
fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
}
if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
mask |=
fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
}
if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
mask |=
fb_niso_intr_en_set_hub_access_counter_notify_m() |
fb_niso_intr_en_set_hub_access_counter_error_m();
}
return mask;
}
void gv11b_fb_enable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type)
{
u32 mask = 0;
mask = gv11b_fb_get_hub_intr_en_mask(g, intr_type);
if (mask)
gv11b_fb_intr_en_set(g, index, mask);
}
void gv11b_fb_disable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type)
{
u32 mask = 0;
mask = gv11b_fb_get_hub_intr_clr_mask(g, intr_type);
if (mask)
gv11b_fb_intr_en_clr(g, index, mask);
gk20a_writel(g, fb_niso_intr_en_clr_r(0),
mask);
}
void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
@@ -1226,10 +1150,6 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
struct mmu_fault_info *mmfault, u32 fault_status)
{
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX,
HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))
gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
@@ -1247,8 +1167,6 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
gk20a_channel_put(mmfault->refch);
mmfault->refch = NULL;
}
g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX,
HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
}
void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
@@ -1395,10 +1313,6 @@ void gv11b_fb_hub_isr(struct gk20a *g)
nvgpu_info(g, "ecc uncorrected error notify");
/* disable interrupts during handling */
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX,
HUB_INTR_TYPE_ECC_UNCORRECTED);
status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
if (status)
gv11b_handle_l2tlb_ecc_isr(g, status);
@@ -1410,11 +1324,6 @@ void gv11b_fb_hub_isr(struct gk20a *g)
status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
if (status)
gv11b_handle_fillunit_ecc_isr(g, status);
/* re-enable interrupts after handling */
g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX,
HUB_INTR_TYPE_ECC_UNCORRECTED);
}
if (niso_intr &
(fb_niso_intr_mmu_other_fault_notify_m() |

View File

@@ -25,9 +25,6 @@
#ifndef _NVGPU_GV11B_FB
#define _NVGPU_GV11B_FB
#define STALL_REG_INDEX 0
#define NONSTALL_REG_INDEX 1
#define NONREPLAY_REG_INDEX 0
#define REPLAY_REG_INDEX 1
@@ -37,22 +34,13 @@
#define FAULT_BUF_INVALID 0
#define FAULT_BUF_VALID 1
#define HUB_INTR_TYPE_OTHER 1U /* bit 0 */
#define HUB_INTR_TYPE_NONREPLAY 2U /* bit 1 */
#define HUB_INTR_TYPE_REPLAY 4U /* bit 2 */
#define HUB_INTR_TYPE_ECC_UNCORRECTED 8U /* bit 3 */
#define HUB_INTR_TYPE_ACCESS_COUNTER 16U /* bit 4 */
#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
HUB_INTR_TYPE_NONREPLAY | \
HUB_INTR_TYPE_REPLAY | \
HUB_INTR_TYPE_ECC_UNCORRECTED | \
HUB_INTR_TYPE_ACCESS_COUNTER)
#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
#define FAULT_TYPE_REPLAY 1
struct gk20a;
void gv11b_fb_init_hw(struct gk20a *g);
void gv11b_fb_init_fs_state(struct gk20a *g);
void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
void gv11b_fb_reset(struct gk20a *g);
@@ -63,10 +51,8 @@ u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
unsigned int index, unsigned int state);
void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
void gv11b_fb_enable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type);
void gv11b_fb_disable_hub_intr(struct gk20a *g,
unsigned int index, unsigned int intr_type);
void gv11b_fb_enable_hub_intr(struct gk20a *g);
void gv11b_fb_disable_hub_intr(struct gk20a *g);
bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
void gv11b_fb_handle_other_fault_notify(struct gk20a *g,

View File

@@ -423,7 +423,7 @@ static const struct gpu_ops gv11b_ops = {
},
.fb = {
.reset = gv11b_fb_reset,
.init_hw = gk20a_fb_init_hw,
.init_hw = gv11b_fb_init_hw,
.init_fs_state = gv11b_fb_init_fs_state,
.init_cbc = gv11b_fb_init_cbc,
.set_mmu_page_size = gm20b_fb_set_mmu_page_size,

View File

@@ -41,7 +41,6 @@ void mc_gv11b_intr_enable(struct gk20a *g)
0xffffffffU);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffffU);
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
@@ -55,9 +54,6 @@ void mc_gv11b_intr_enable(struct gk20a *g)
mc_intr_pfifo_pending_f()
| eng_intr_mask;
/* TODO: Enable PRI faults for HUB ECC err intr */
g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);

View File

@@ -77,11 +77,7 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
HUB_INTR_TYPE_REPLAY));
g->ops.fb.disable_hub_intr(g);
if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
@@ -105,15 +101,12 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
}
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
u32 *hub_intr_types)
static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g)
{
*hub_intr_types |= HUB_INTR_TYPE_OTHER;
return 0;
}
static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
u32 *hub_intr_types)
static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0;
@@ -136,8 +129,6 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
}
}
*hub_intr_types |= HUB_INTR_TYPE_NONREPLAY;
if (!nvgpu_mem_is_valid(
&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) {
err = nvgpu_dma_alloc_map_sys(vm, fb_size,
@@ -149,8 +140,6 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
return;
}
}
*hub_intr_types |= HUB_INTR_TYPE_REPLAY;
}
static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
@@ -170,12 +159,10 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
nvgpu_mutex_init(&g->mm.hub_isr_mutex);
g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED;
err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types);
err = gv11b_mm_mmu_fault_info_buf_init(g);
if (!err)
gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types);
gv11b_mm_mmu_hw_fault_buf_init(g);
return err;
}

View File

@@ -131,7 +131,6 @@ struct mm_gk20a {
struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
struct mmu_fault_info fault_info[FAULT_TYPE_NUM];
struct nvgpu_mutex hub_isr_mutex;
u32 hub_intr_types;
/*
* Separate function to cleanup the CE since it requires a channel to