mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: store ch ptr in gr isr data
Store a channel pointer that is either NULL or a referenced channel to avoid confusion about channel ownership. A pure channel ID is dangerous. Jira NVGPU-1460 Change-Id: I6f7b4f80cf39abc290ce9153ec6bf5b62918da97 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1955401 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
7df3d58750
commit
4e6d9afab8
@@ -5028,13 +5028,15 @@ int gk20a_gr_reset(struct gk20a *g)
|
|||||||
static void gk20a_gr_set_error_notifier(struct gk20a *g,
|
static void gk20a_gr_set_error_notifier(struct gk20a *g,
|
||||||
struct gr_gk20a_isr_data *isr_data, u32 error_notifier)
|
struct gr_gk20a_isr_data *isr_data, u32 error_notifier)
|
||||||
{
|
{
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
|
||||||
struct channel_gk20a *ch;
|
struct channel_gk20a *ch;
|
||||||
struct tsg_gk20a *tsg;
|
struct tsg_gk20a *tsg;
|
||||||
struct channel_gk20a *ch_tsg;
|
struct channel_gk20a *ch_tsg;
|
||||||
|
|
||||||
if (isr_data->chid != FIFO_INVAL_CHANNEL_ID) {
|
ch = isr_data->ch;
|
||||||
ch = &f->channel[isr_data->chid];
|
|
||||||
|
if (ch == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||||
tsg = &g->fifo.tsg[ch->tsgid];
|
tsg = &g->fifo.tsg[ch->tsgid];
|
||||||
@@ -5051,7 +5053,6 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
|
|||||||
} else {
|
} else {
|
||||||
g->ops.fifo.set_error_notifier(ch, error_notifier);
|
g->ops.fifo.set_error_notifier(ch, error_notifier);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
|
static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
|
||||||
@@ -5110,6 +5111,8 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
|
|||||||
{
|
{
|
||||||
u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r());
|
u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r());
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 chid = isr_data->ch != NULL ?
|
||||||
|
isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
|
|
||||||
if (gr_fecs_intr == 0U) {
|
if (gr_fecs_intr == 0U) {
|
||||||
return 0;
|
return 0;
|
||||||
@@ -5128,12 +5131,12 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
|
|||||||
gr_fecs_host_int_status_watchdog_active_f()) != 0U) {
|
gr_fecs_host_int_status_watchdog_active_f()) != 0U) {
|
||||||
/* currently, recovery is not initiated */
|
/* currently, recovery is not initiated */
|
||||||
nvgpu_err(g, "fecs watchdog triggered for channel %u, "
|
nvgpu_err(g, "fecs watchdog triggered for channel %u, "
|
||||||
"cannot ctxsw anymore !!", isr_data->chid);
|
"cannot ctxsw anymore !!", chid);
|
||||||
g->ops.gr.dump_gr_falcon_stats(g);
|
g->ops.gr.dump_gr_falcon_stats(g);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"fecs error interrupt 0x%08x for channel %u",
|
"fecs error interrupt 0x%08x for channel %u",
|
||||||
gr_fecs_intr, isr_data->chid);
|
gr_fecs_intr, chid);
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_writel(g, gr_fecs_host_int_clear_r(), gr_fecs_intr);
|
gk20a_writel(g, gr_fecs_host_int_clear_r(), gr_fecs_intr);
|
||||||
@@ -5144,6 +5147,8 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
|
|||||||
struct gr_gk20a_isr_data *isr_data)
|
struct gr_gk20a_isr_data *isr_data)
|
||||||
{
|
{
|
||||||
u32 gr_class_error;
|
u32 gr_class_error;
|
||||||
|
u32 chid = isr_data->ch != NULL ?
|
||||||
|
isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
@@ -5162,7 +5167,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
|
|||||||
gk20a_readl(g, gr_trapped_data_mme_r())),
|
gk20a_readl(g, gr_trapped_data_mme_r())),
|
||||||
gr_trapped_addr_datahigh_v(isr_data->addr),
|
gr_trapped_addr_datahigh_v(isr_data->addr),
|
||||||
gr_trapped_addr_priv_v(isr_data->addr),
|
gr_trapped_addr_priv_v(isr_data->addr),
|
||||||
gr_class_error, isr_data->chid);
|
gr_class_error, chid);
|
||||||
|
|
||||||
nvgpu_err(g, "trapped data low 0x%08x",
|
nvgpu_err(g, "trapped data low 0x%08x",
|
||||||
gk20a_readl(g, gr_trapped_data_lo_r()));
|
gk20a_readl(g, gr_trapped_data_lo_r()));
|
||||||
@@ -5177,6 +5182,9 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
|
|||||||
static int gk20a_gr_handle_firmware_method(struct gk20a *g,
|
static int gk20a_gr_handle_firmware_method(struct gk20a *g,
|
||||||
struct gr_gk20a_isr_data *isr_data)
|
struct gr_gk20a_isr_data *isr_data)
|
||||||
{
|
{
|
||||||
|
u32 chid = isr_data->ch != NULL ?
|
||||||
|
isr_data->ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
gk20a_gr_set_error_notifier(g, isr_data,
|
gk20a_gr_set_error_notifier(g, isr_data,
|
||||||
@@ -5184,15 +5192,14 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
|
|||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"firmware method 0x%08x, offset 0x%08x for channel %u",
|
"firmware method 0x%08x, offset 0x%08x for channel %u",
|
||||||
isr_data->class_num, isr_data->offset,
|
isr_data->class_num, isr_data->offset,
|
||||||
isr_data->chid);
|
chid);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
|
int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
|
||||||
struct gr_gk20a_isr_data *isr_data)
|
struct gr_gk20a_isr_data *isr_data)
|
||||||
{
|
{
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
struct channel_gk20a *ch = isr_data->ch;
|
||||||
struct channel_gk20a *ch = &f->channel[isr_data->chid];
|
|
||||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||||
|
|
||||||
g->ops.fifo.post_event_id(tsg,
|
g->ops.fifo.post_event_id(tsg,
|
||||||
@@ -5226,8 +5233,7 @@ static inline bool is_valid_cyclestats_bar0_offset_gk20a(struct gk20a *g,
|
|||||||
int gk20a_gr_handle_notify_pending(struct gk20a *g,
|
int gk20a_gr_handle_notify_pending(struct gk20a *g,
|
||||||
struct gr_gk20a_isr_data *isr_data)
|
struct gr_gk20a_isr_data *isr_data)
|
||||||
{
|
{
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
struct channel_gk20a *ch = isr_data->ch;
|
||||||
struct channel_gk20a *ch = &f->channel[isr_data->chid];
|
|
||||||
|
|
||||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||||
void *virtual_address;
|
void *virtual_address;
|
||||||
@@ -5774,6 +5780,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
struct tsg_gk20a *tsg = NULL;
|
struct tsg_gk20a *tsg = NULL;
|
||||||
u32 gr_engine_id;
|
u32 gr_engine_id;
|
||||||
u32 global_esr = 0;
|
u32 global_esr = 0;
|
||||||
|
u32 chid;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr);
|
nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr);
|
||||||
@@ -5806,10 +5813,10 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
isr_data.class_num = gr_fe_object_table_nvclass_v(obj_table);
|
isr_data.class_num = gr_fe_object_table_nvclass_v(obj_table);
|
||||||
|
|
||||||
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
||||||
if (ch != NULL) {
|
isr_data.ch = ch;
|
||||||
isr_data.chid = ch->chid;
|
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
} else {
|
|
||||||
isr_data.chid = FIFO_INVAL_CHANNEL_ID;
|
if (ch == NULL) {
|
||||||
nvgpu_err(g, "ch id is INVALID 0xffffffff");
|
nvgpu_err(g, "ch id is INVALID 0xffffffff");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5822,7 +5829,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
"data 0x%08x 0x%08x,"
|
"data 0x%08x 0x%08x,"
|
||||||
"ctx 0x%08x, offset 0x%08x, "
|
"ctx 0x%08x, offset 0x%08x, "
|
||||||
"subchannel 0x%08x, class 0x%08x",
|
"subchannel 0x%08x, class 0x%08x",
|
||||||
isr_data.chid, isr_data.addr,
|
chid, isr_data.addr,
|
||||||
isr_data.data_hi, isr_data.data_lo,
|
isr_data.data_hi, isr_data.data_lo,
|
||||||
isr_data.curr_ctx, isr_data.offset,
|
isr_data.curr_ctx, isr_data.offset,
|
||||||
isr_data.sub_chan, isr_data.class_num);
|
isr_data.sub_chan, isr_data.class_num);
|
||||||
@@ -5999,10 +6006,9 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||||
"GPC exception pending");
|
"GPC exception pending");
|
||||||
|
|
||||||
fault_ch = gk20a_fifo_channel_from_chid(g,
|
fault_ch = isr_data.ch;
|
||||||
isr_data.chid);
|
|
||||||
|
|
||||||
/*isr_data.chid can be ~0 and fault_ch can be NULL */
|
/* fault_ch can be NULL */
|
||||||
/* check if any gpc has an exception */
|
/* check if any gpc has an exception */
|
||||||
if (gk20a_gr_handle_gpc_exception(g, &post_event,
|
if (gk20a_gr_handle_gpc_exception(g, &post_event,
|
||||||
fault_ch, &global_esr) != 0) {
|
fault_ch, &global_esr) != 0) {
|
||||||
|
|||||||
@@ -234,7 +234,7 @@ struct gr_gk20a_isr_data {
|
|||||||
u32 data_lo;
|
u32 data_lo;
|
||||||
u32 data_hi;
|
u32 data_hi;
|
||||||
u32 curr_ctx;
|
u32 curr_ctx;
|
||||||
u32 chid;
|
struct channel_gk20a *ch;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
u32 sub_chan;
|
u32 sub_chan;
|
||||||
u32 class_num;
|
u32 class_num;
|
||||||
|
|||||||
Reference in New Issue
Block a user