gpu: nvgpu: add pbdma intr_enable HAL ops.

A new HAL ops intr_enable() is constructed in
hal.fifo.pbdma unit. The implementation for this HAL ops
is based on gm20b and gv11b architectures.

Jira NVGPU-2950

Change-Id: Ifd9c3bfad4264449c52f411e8cad8674c3756048
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2073536
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-15 14:34:04 +05:30
committed by mobile promotions
parent ce5c43d24a
commit 52cbc88a00
14 changed files with 113 additions and 52 deletions

View File

@@ -448,6 +448,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = NULL,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = NULL,

View File

@@ -528,6 +528,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = NULL,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = NULL,

View File

@@ -609,6 +609,7 @@ static const struct gpu_ops gm20b_ops = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = gm20b_pbdma_intr_enable,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gm20b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,

View File

@@ -689,6 +689,7 @@ static const struct gpu_ops gp10b_ops = {
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = gm20b_pbdma_intr_enable,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,

View File

@@ -872,6 +872,7 @@ static const struct gpu_ops gv100_ops = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = gv11b_pbdma_intr_enable,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,

View File

@@ -827,6 +827,7 @@ static const struct gpu_ops gv11b_ops = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = gv11b_pbdma_intr_enable,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,

View File

@@ -66,41 +66,19 @@ static u32 gk20a_fifo_intr_0_en_mask(struct gk20a *g)
void gk20a_fifo_intr_0_enable(struct gk20a *g, bool enable)
{
unsigned int i;
u32 intr_stall, mask;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 mask;
if (!enable) {
g->ops.fifo.ctxsw_timeout_enable(g, false);
nvgpu_writel(g, fifo_intr_en_0_r(), 0U);
g->ops.fifo.ctxsw_timeout_enable(g, false);
g->ops.pbdma.intr_enable(g, false);
return;
}
/* Enable interrupts */
g->ops.fifo.ctxsw_timeout_enable(g, true);
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
nvgpu_writel(g, pbdma_intr_0_r(i), U32_MAX);
nvgpu_writel(g, pbdma_intr_1_r(i), U32_MAX);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(i));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
nvgpu_writel(g, pbdma_intr_stall_r(i), intr_stall);
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
g->ops.pbdma.intr_enable(g, true);
/* reset runlist interrupts */
nvgpu_writel(g, fifo_intr_runlist_r(), ~U32(0U));

View File

@@ -98,40 +98,19 @@ static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g)
void gv11b_fifo_intr_0_enable(struct gk20a *g, bool enable)
{
unsigned int i;
u32 intr_stall, mask;
u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 mask;
if (!enable) {
g->ops.fifo.ctxsw_timeout_enable(g, false);
nvgpu_writel(g, fifo_intr_en_0_r(), 0);
g->ops.fifo.ctxsw_timeout_enable(g, false);
g->ops.pbdma.intr_enable(g, false);
return;
}
/* Enable interrupts */
g->ops.fifo.ctxsw_timeout_enable(g, true);
/* clear and enable pbdma interrupt */
for (i = 0; i < host_num_pbdma; i++) {
nvgpu_writel(g, pbdma_intr_0_r(i), U32_MAX);
nvgpu_writel(g, pbdma_intr_1_r(i), U32_MAX);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(i));
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(i), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(i));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", i,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(i), intr_stall);
}
g->ops.pbdma.intr_enable(g, true);
/* clear runlist interrupts */
nvgpu_writel(g, fifo_intr_runlist_r(), ~U32(0U));

View File

@@ -72,6 +72,64 @@ static bool gm20b_pbdma_is_sw_method_subch(struct gk20a *g, u32 pbdma_id,
return false;
}
static void gm20b_pbdma_disable_all_intr(struct gk20a *g, u32 pbdma_id)
{
nvgpu_writel(g, pbdma_intr_en_0_r(pbdma_id), 0U);
nvgpu_writel(g, pbdma_intr_en_1_r(pbdma_id), 0U);
}
void gm20b_pbdma_clear_all_intr(struct gk20a *g, u32 pbdma_id)
{
nvgpu_writel(g, pbdma_intr_0_r(pbdma_id), U32_MAX);
nvgpu_writel(g, pbdma_intr_1_r(pbdma_id), U32_MAX);
}
void gm20b_pbdma_disable_and_clear_all_intr(struct gk20a *g)
{
u32 pbdma_id = 0;
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) {
gm20b_pbdma_disable_all_intr(g, pbdma_id);
gm20b_pbdma_clear_all_intr(g, pbdma_id);
}
}
void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable)
{
u32 pbdma_id = 0;
u32 intr_stall;
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
if (!enable) {
gm20b_pbdma_disable_and_clear_all_intr(g);
return;
}
/* clear and enable pbdma interrupts */
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) {
gm20b_pbdma_clear_all_intr(g, pbdma_id);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(pbdma_id));
intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
nvgpu_writel(g, pbdma_intr_stall_r(pbdma_id), intr_stall);
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", pbdma_id,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(pbdma_id), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(pbdma_id));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", pbdma_id,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(pbdma_id), intr_stall);
}
}
unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *handled, u32 *error_notifier)
{

View File

@@ -28,6 +28,8 @@
struct gk20a;
struct gk20a_debug_output;
void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable);
unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *handled, u32 *error_notifier);
unsigned int gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id,
@@ -44,6 +46,8 @@ u32 gm20b_pbdma_device_fatal_0_intr_descs(void);
u32 gm20b_pbdma_channel_fatal_0_intr_descs(void);
u32 gm20b_pbdma_restartable_0_intr_descs(void);
void gm20b_pbdma_clear_all_intr(struct gk20a *g, u32 pbdma_id);
void gm20b_pbdma_disable_and_clear_all_intr(struct gk20a *g);
unsigned int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier);

View File

@@ -91,6 +91,39 @@ static void report_pbdma_error(struct gk20a *g, u32 pbdma_id,
return;
}
void gv11b_pbdma_intr_enable(struct gk20a *g, bool enable)
{
u32 pbdma_id = 0;
u32 intr_stall;
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
if (!enable) {
gm20b_pbdma_disable_and_clear_all_intr(g);
return;
}
/* clear and enable pbdma interrupt */
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) {
gm20b_pbdma_clear_all_intr(g, pbdma_id);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_r(pbdma_id));
nvgpu_log_info(g, "pbdma id:%u, intr_en_0 0x%08x", pbdma_id,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_0_r(pbdma_id), intr_stall);
intr_stall = nvgpu_readl(g, pbdma_intr_stall_1_r(pbdma_id));
/*
* For bug 2082123
* Mask the unused HCE_RE_ILLEGAL_OP bit from the interrupt.
*/
intr_stall &= ~pbdma_intr_stall_1_hce_illegal_op_enabled_f();
nvgpu_log_info(g, "pbdma id:%u, intr_en_1 0x%08x", pbdma_id,
intr_stall);
nvgpu_writel(g, pbdma_intr_en_1_r(pbdma_id), intr_stall);
}
}
unsigned int gv11b_pbdma_handle_intr_0(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier)
@@ -204,4 +237,4 @@ u32 gv11b_pbdma_channel_fatal_0_intr_descs(void)
pbdma_intr_0_signature_pending_f();
return channel_fatal_0_intr_descs;
}
}

View File

@@ -27,6 +27,7 @@
struct gk20a;
void gv11b_pbdma_intr_enable(struct gk20a *g, bool enable);
unsigned int gv11b_pbdma_handle_intr_0(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier);

View File

@@ -1000,6 +1000,7 @@ struct gpu_ops {
} engine;
struct {
void (*intr_enable)(struct gk20a *g, bool enable);
unsigned int (*handle_pbdma_intr_0)(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier);

View File

@@ -910,6 +910,7 @@ static const struct gpu_ops tu104_ops = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
},
.pbdma = {
.intr_enable = gv11b_pbdma_intr_enable,
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,