gpu: nvgpu: Move gk20a_gr_nonstall_isr function to hal

Change gk20a_gr_nonstall_isr function to hal under hal.gr.intr

Use nvgpu_gr_gpc_offset and nvgpu_gr_tpc_offset call in
gm20b_gr_intr_handle_tex_exception function.

Update gk20a_gr_nonstall_isr call as g->ops.gr.intr.nonstall_isr

JIRA NVGPU-3016

Change-Id: I9ff39cf1a99bf5b3d215cda6bc68fab1ecae51e3
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2088133
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-04-02 13:26:57 -07:00
committed by mobile promotions
parent 9abe4608b4
commit 4b433b528e
10 changed files with 26 additions and 23 deletions

View File

@@ -2293,23 +2293,6 @@ int gk20a_gr_isr(struct gk20a *g)
return 0;
}
u32 gk20a_gr_nonstall_isr(struct gk20a *g)
{
u32 ops = 0;
u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) {
/* Clear the interrupt */
gk20a_writel(g, gr_intr_nonstall_r(),
gr_intr_nonstall_trap_pending_f());
ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE |
GK20A_NONSTALL_OPS_POST_EVENTS);
}
return ops;
}
int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size)
{
BUG_ON(size == NULL);

View File

@@ -299,7 +299,6 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
int gk20a_gr_isr(struct gk20a *g);
u32 gk20a_gr_nonstall_isr(struct gk20a *g);
/* pmu */
int gr_gk20a_fecs_get_reglist_img_size(struct gk20a *g, u32 *size);

View File

@@ -488,6 +488,7 @@ static const struct gpu_ops gm20b_ops = {
.enable_gpc_exceptions =
gm20b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gm20b_gr_intr_enable_exceptions,
.nonstall_isr = gm20b_gr_intr_nonstall_isr,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,

View File

@@ -574,6 +574,7 @@ static const struct gpu_ops gp10b_ops = {
.enable_gpc_exceptions =
gm20b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gm20b_gr_intr_enable_exceptions,
.nonstall_isr = gm20b_gr_intr_nonstall_isr,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,

View File

@@ -718,6 +718,7 @@ static const struct gpu_ops gv100_ops = {
.enable_gpc_exceptions =
gv11b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gv11b_gr_intr_enable_exceptions,
.nonstall_isr = gm20b_gr_intr_nonstall_isr,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,

View File

@@ -677,6 +677,7 @@ static const struct gpu_ops gv11b_ops = {
.enable_gpc_exceptions =
gv11b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gv11b_gr_intr_enable_exceptions,
.nonstall_isr = gm20b_gr_intr_nonstall_isr,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,

View File

@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/io.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_intr.h>
@@ -59,10 +60,7 @@ u32 gm20b_gr_intr_get_tpc_exception(struct gk20a *g, u32 offset,
void gm20b_gr_intr_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc)
{
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
GPU_LIT_TPC_IN_GPC_STRIDE);
u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
u32 offset = nvgpu_gr_gpc_offset(g, gpc) + nvgpu_gr_tpc_offset(g, tpc);
u32 esr;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
@@ -124,3 +122,20 @@ void gm20b_gr_intr_enable_gpc_exceptions(struct gk20a *g,
nvgpu_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(), tpc_mask);
}
u32 gm20b_gr_intr_nonstall_isr(struct gk20a *g)
{
u32 ops = 0;
u32 gr_intr = nvgpu_readl(g, gr_intr_nonstall_r());
nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) {
/* Clear the interrupt */
nvgpu_writel(g, gr_intr_nonstall_r(),
gr_intr_nonstall_trap_pending_f());
ops |= (GK20A_NONSTALL_OPS_WAKEUP_SEMAPHORE |
GK20A_NONSTALL_OPS_POST_EVENTS);
}
return ops;
}

View File

@@ -39,5 +39,6 @@ void gm20b_gr_intr_enable_exceptions(struct gk20a *g,
bool enable);
void gm20b_gr_intr_enable_gpc_exceptions(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
u32 gm20b_gr_intr_nonstall_isr(struct gk20a *g);
#endif /* NVGPU_GR_INTR_GM20B_H */

View File

@@ -111,7 +111,7 @@ u32 gm20b_mc_isr_nonstall(struct gk20a *g)
engine_enum = engine_info->engine_enum;
/* GR Engine */
if (engine_enum == NVGPU_ENGINE_GR_GK20A) {
ops |= gk20a_gr_nonstall_isr(g);
ops |= g->ops.gr.intr.nonstall_isr(g);
}
/* CE Engine */
if (((engine_enum == NVGPU_ENGINE_GRCE_GK20A) ||

View File

@@ -799,6 +799,7 @@ struct gpu_ops {
bool enable);
void (*enable_gpc_exceptions)(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
u32 (*nonstall_isr)(struct gk20a *g);
} intr;
u32 (*get_ctxsw_checksum_mismatch_mailbox_val)(void);