gpu: nvgpu: separate sec2 isr into common and hal

SEC2 isr handling requires message processing in common msg unit. That
unit requires interfaces from hal to know if the msg interrupt was
received, set the msg interrupt and handle other interrupts.

JIRA NVGPU-2025

Change-Id: I3b5ad8968ea9298cc769113417931c4678009cf1
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2085753
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-03-29 22:37:58 +05:30
committed by mobile promotions
parent 974ad342fa
commit f2ad7e0916
5 changed files with 95 additions and 48 deletions

View File

@@ -199,6 +199,54 @@ exit:
return status; return status;
} }
static void sec2_isr(struct gk20a *g, struct nvgpu_sec2 *sec2)
{
bool recheck = false;
u32 intr;
if (!g->ops.sec2.is_interrupted(sec2)) {
return;
}
nvgpu_mutex_acquire(&sec2->isr_mutex);
if (!sec2->isr_enabled) {
goto exit;
}
intr = g->ops.sec2.get_intr(g);
if (intr == 0U) {
goto exit;
}
/*
* Handle swgen0 interrupt to process received messages from SEC2.
* If any other interrupt is to be handled with some software
* action expected, then it should be handled here.
* g->ops.sec2.isr call below will handle other hardware interrupts
* that are not expected to be handled in software.
*/
if (g->ops.sec2.msg_intr_received(g)) {
if (nvgpu_sec2_process_message(sec2) != 0) {
g->ops.sec2.clr_intr(g, intr);
goto exit;
}
recheck = true;
}
g->ops.sec2.process_intr(g, sec2);
g->ops.sec2.clr_intr(g, intr);
if (recheck) {
if (!nvgpu_sec2_queue_is_empty(sec2->queues,
SEC2_NV_MSGQ_LOG_ID)) {
g->ops.sec2.set_msg_intr(g);
}
}
exit:
nvgpu_mutex_release(&sec2->isr_mutex);
}
int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms, int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
void *var, u8 val) void *var, u8 val)
{ {
@@ -213,9 +261,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
return 0; return 0;
} }
if (g->ops.sec2.is_interrupted(sec2)) { sec2_isr(g, sec2);
g->ops.sec2.isr(g);
}
nvgpu_usleep_range(delay, delay * 2U); nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1U, POLL_DELAY_MAX_US); delay = min_t(u32, delay << 1U, POLL_DELAY_MAX_US);

View File

@@ -1840,7 +1840,11 @@ struct gpu_ops {
void (*secured_sec2_start)(struct gk20a *g); void (*secured_sec2_start)(struct gk20a *g);
void (*enable_irq)(struct nvgpu_sec2 *sec2, bool enable); void (*enable_irq)(struct nvgpu_sec2 *sec2, bool enable);
bool (*is_interrupted)(struct nvgpu_sec2 *sec2); bool (*is_interrupted)(struct nvgpu_sec2 *sec2);
void (*isr)(struct gk20a *g); u32 (*get_intr)(struct gk20a *g);
bool (*msg_intr_received)(struct gk20a *g);
void (*set_msg_intr)(struct gk20a *g);
void (*clr_intr)(struct gk20a *g, u32 intr);
void (*process_intr)(struct gk20a *g, struct nvgpu_sec2 *sec2);
void (*msgq_tail)(struct gk20a *g, struct nvgpu_sec2 *sec2, void (*msgq_tail)(struct gk20a *g, struct nvgpu_sec2 *sec2,
u32 *tail, bool set); u32 *tail, bool set);
u32 (*falcon_base_addr)(void); u32 (*falcon_base_addr)(void);

View File

@@ -1412,7 +1412,11 @@ static const struct gpu_ops tu104_ops = {
.secured_sec2_start = tu104_start_sec2_secure, .secured_sec2_start = tu104_start_sec2_secure,
.enable_irq = tu104_sec2_enable_irq, .enable_irq = tu104_sec2_enable_irq,
.is_interrupted = tu104_sec2_is_interrupted, .is_interrupted = tu104_sec2_is_interrupted,
.isr = tu104_sec2_isr, .get_intr = tu104_sec2_get_intr,
.msg_intr_received = tu104_sec2_msg_intr_received,
.set_msg_intr = tu104_sec2_set_msg_intr,
.clr_intr = tu104_sec2_clr_intr,
.process_intr = tu104_sec2_process_intr,
.msgq_tail = tu104_sec2_msgq_tail, .msgq_tail = tu104_sec2_msgq_tail,
.falcon_base_addr = tu104_sec2_falcon_base_addr, .falcon_base_addr = tu104_sec2_falcon_base_addr,
.sec2_reset = tu104_sec2_reset, .sec2_reset = tu104_sec2_reset,

View File

@@ -22,7 +22,6 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/falcon.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
@@ -310,7 +309,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
u32 intr_mask; u32 intr_mask;
u32 intr_dest; u32 intr_dest;
nvgpu_falcon_set_irq(&g->sec2.flcn, false, 0x0, 0x0); g->ops.falcon.set_irq(&sec2->flcn, false, 0x0, 0x0);
if (enable) { if (enable) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
@@ -343,7 +342,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
psec_falcon_irqmset_swgen0_f(1) | psec_falcon_irqmset_swgen0_f(1) |
psec_falcon_irqmset_swgen1_f(1); psec_falcon_irqmset_swgen1_f(1);
nvgpu_falcon_set_irq(&g->sec2.flcn, true, intr_mask, intr_dest); g->ops.falcon.set_irq(&sec2->flcn, true, intr_mask, intr_dest);
} }
} }
@@ -364,34 +363,43 @@ bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2)
return false; return false;
} }
void tu104_sec2_isr(struct gk20a *g) u32 tu104_sec2_get_intr(struct gk20a *g)
{ {
struct nvgpu_sec2 *sec2 = &g->sec2; u32 mask;
struct nvgpu_engine_mem_queue *queue;
u32 intr, mask;
bool recheck = false;
nvgpu_mutex_acquire(&sec2->isr_mutex);
if (!sec2->isr_enabled) {
nvgpu_mutex_release(&sec2->isr_mutex);
return;
}
mask = gk20a_readl(g, psec_falcon_irqmask_r()) & mask = gk20a_readl(g, psec_falcon_irqmask_r()) &
gk20a_readl(g, psec_falcon_irqdest_r()); gk20a_readl(g, psec_falcon_irqdest_r());
intr = gk20a_readl(g, psec_falcon_irqstat_r()); return gk20a_readl(g, psec_falcon_irqstat_r()) & mask;
intr = gk20a_readl(g, psec_falcon_irqstat_r()) & mask;
if (intr == 0U) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
nvgpu_mutex_release(&sec2->isr_mutex);
return;
} }
bool tu104_sec2_msg_intr_received(struct gk20a *g)
{
u32 intr = tu104_sec2_get_intr(g);
return (intr & psec_falcon_irqstat_swgen0_true_f()) != 0U;
}
void tu104_sec2_set_msg_intr(struct gk20a *g)
{
gk20a_writel(g, psec_falcon_irqsset_r(),
psec_falcon_irqsset_swgen0_set_f());
}
void tu104_sec2_clr_intr(struct gk20a *g, u32 intr)
{
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
}
void tu104_sec2_process_intr(struct gk20a *g, struct nvgpu_sec2 *sec2)
{
u32 intr;
intr = tu104_sec2_get_intr(g);
if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) { if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) {
nvgpu_err(g, "sec2 halt intr not implemented"); nvgpu_err(g, "sec2 halt intr not implemented");
nvgpu_falcon_dump_stats(&g->sec2.flcn); g->ops.falcon.dump_falcon_stats(&sec2->flcn);
} }
if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) { if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) {
nvgpu_err(g, nvgpu_err(g,
@@ -402,27 +410,7 @@ void tu104_sec2_isr(struct gk20a *g)
~psec_falcon_exterrstat_valid_m()); ~psec_falcon_exterrstat_valid_m());
} }
if ((intr & psec_falcon_irqstat_swgen0_true_f()) != 0U) {
if (nvgpu_sec2_process_message(sec2) != 0) {
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
goto exit;
}
recheck = true;
}
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
if (recheck) {
queue = sec2->queues[SEC2_NV_MSGQ_LOG_ID];
if (!nvgpu_engine_mem_queue_is_empty(queue)) {
gk20a_writel(g, psec_falcon_irqsset_r(),
psec_falcon_irqsset_swgen0_set_f());
}
}
exit:
nvgpu_sec2_dbg(g, "Done"); nvgpu_sec2_dbg(g, "Done");
nvgpu_mutex_release(&sec2->isr_mutex);
} }
void tu104_start_sec2_secure(struct gk20a *g) void tu104_start_sec2_secure(struct gk20a *g)

View File

@@ -23,6 +23,7 @@
#ifndef NVGPU_SEC2_TU104_H #ifndef NVGPU_SEC2_TU104_H
#define NVGPU_SEC2_TU104_H #define NVGPU_SEC2_TU104_H
struct gk20a;
struct nvgpu_sec2; struct nvgpu_sec2;
int tu104_sec2_reset(struct gk20a *g); int tu104_sec2_reset(struct gk20a *g);
@@ -39,8 +40,12 @@ int tu104_sec2_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2, void tu104_sec2_msgq_tail(struct gk20a *g, struct nvgpu_sec2 *sec2,
u32 *tail, bool set); u32 *tail, bool set);
void tu104_sec2_isr(struct gk20a *g);
bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2); bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2);
u32 tu104_sec2_get_intr(struct gk20a *g);
bool tu104_sec2_msg_intr_received(struct gk20a *g);
void tu104_sec2_set_msg_intr(struct gk20a *g);
void tu104_sec2_clr_intr(struct gk20a *g, u32 intr);
void tu104_sec2_process_intr(struct gk20a *g, struct nvgpu_sec2 *sec2);
void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable); void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable);
void tu104_start_sec2_secure(struct gk20a *g); void tu104_start_sec2_secure(struct gk20a *g);
u32 tu104_sec2_falcon_base_addr(void); u32 tu104_sec2_falcon_base_addr(void);