mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Add NVGPU_FEATURE_CHANNEL_TSG_CONTROL compiler flag
This flag is added to compile out below features from safety build -set_preemption_mode -channel_enable -channel_disable -channel_preempt -channel_force_reset -tsg_enable -tsg_disable -tsg_preempt -tsg_event_id_ctrl -post_event_id JIRA NVGPU-3516 Change-Id: I935841db766f192f62598240c0e245a2959555be Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2126829 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9e3f0b22e9
commit
1e7405a5dc
@@ -31,6 +31,7 @@ ccflags-y += -DNVGPU_VPR
|
||||
ccflags-y += -DNVGPU_REPLAYABLE_FAULT
|
||||
ccflags-y += -DNVGPU_GRAPHICS
|
||||
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
|
||||
obj-$(CONFIG_GK20A) := nvgpu.o
|
||||
|
||||
|
||||
@@ -80,6 +80,10 @@ NVGPU_COMMON_CFLAGS += -DNVGPU_ENGINE
|
||||
NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING := 1
|
||||
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
|
||||
# Enable Channel/TSG Control for safety build until devctl whitelisting is done
|
||||
NVGPU_FEATURE_CHANNEL_TSG_CONTROL := 1
|
||||
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
|
||||
#
|
||||
# Flags enabled for only the regular build profile.
|
||||
#
|
||||
|
||||
@@ -1754,12 +1754,14 @@ static void nvgpu_channel_wdt_handler(struct nvgpu_channel *ch)
|
||||
gk20a_gr_debug_dump(g);
|
||||
}
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
if (g->ops.tsg.force_reset(ch,
|
||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT,
|
||||
ch->wdt.debug_dump) != 0) {
|
||||
nvgpu_err(g, "failed tsg force reset for chid: %d",
|
||||
ch->chid);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2597,6 +2599,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
|
||||
nvgpu_warn(g, "failed to broadcast");
|
||||
}
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
if (post_events) {
|
||||
struct nvgpu_tsg *tsg =
|
||||
nvgpu_tsg_from_ch(c);
|
||||
@@ -2605,6 +2608,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
|
||||
NVGPU_EVENT_ID_BLOCKING_SYNC);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Only non-deterministic channels get the
|
||||
* channel_update callback. We don't allow
|
||||
|
||||
@@ -290,6 +290,7 @@ static void nvgpu_tsg_destroy(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
nvgpu_mutex_destroy(&tsg->event_id_list_lock);
|
||||
}
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
/* force reset tsg that the channel is bound to */
|
||||
int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
|
||||
u32 err_code, bool verbose)
|
||||
@@ -308,6 +309,7 @@ int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void nvgpu_tsg_cleanup_sw(struct gk20a *g)
|
||||
{
|
||||
|
||||
@@ -132,6 +132,7 @@ static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
static void gr_intr_post_bpt_events(struct gk20a *g, struct nvgpu_tsg *tsg,
|
||||
u32 global_esr)
|
||||
{
|
||||
@@ -145,6 +146,7 @@ static void gr_intr_post_bpt_events(struct gk20a *g, struct nvgpu_tsg *tsg,
|
||||
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_BPT_PAUSE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int gr_intr_handle_illegal_method(struct gk20a *g,
|
||||
struct nvgpu_gr_isr_data *isr_data)
|
||||
@@ -645,8 +647,10 @@ void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
|
||||
if (tsg != NULL) {
|
||||
int err;
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
g->ops.tsg.post_event_id(tsg,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
#endif
|
||||
|
||||
err = nvgpu_cond_broadcast(&ch->semaphore_wq);
|
||||
if (err != 0) {
|
||||
@@ -840,10 +844,12 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
|
||||
/* Enable fifo access */
|
||||
g->ops.gr.init.fifo_access(g, true);
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
/* Posting of BPT events should be the last thing in this function */
|
||||
if ((global_esr != 0U) && (tsg != NULL) && (need_reset == false)) {
|
||||
gr_intr_post_bpt_events(g, tsg, global_esr);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ch != NULL) {
|
||||
nvgpu_channel_put(ch);
|
||||
|
||||
@@ -222,6 +222,7 @@ void nvgpu_gr_setup_free_subctx(struct nvgpu_channel *c)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
|
||||
u32 graphics_preempt_mode,
|
||||
u32 compute_preempt_mode)
|
||||
@@ -315,3 +316,4 @@ enable_ch:
|
||||
}
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -471,7 +471,9 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
|
||||
nvgpu_gr_ctx_set_cilp_preempt_pending(gr_ctx, true);
|
||||
g->gr->cilp_preempt_pending_chid = fault_ch->chid;
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
g->ops.tsg.post_event_id(tsg, NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -111,7 +111,9 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
|
||||
struct nvgpu_channel *ch;
|
||||
u32 chid = NVGPU_INVALID_CHANNEL_ID;
|
||||
int ret = 0;
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
struct nvgpu_tsg *tsg;
|
||||
#endif
|
||||
struct nvgpu_fecs_host_intr_status fecs_host_intr;
|
||||
u32 gr_fecs_intr = g->ops.gr.falcon.fecs_host_intr_status(g,
|
||||
&fecs_host_intr);
|
||||
@@ -159,10 +161,11 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
|
||||
g->ops.debugger.post_events(ch);
|
||||
#endif
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
g->ops.tsg.post_event_id(tsg,
|
||||
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
|
||||
#endif
|
||||
|
||||
nvgpu_channel_put(ch);
|
||||
}
|
||||
|
||||
@@ -746,8 +746,10 @@ static const struct gpu_ops gm20b_ops = {
|
||||
nvgpu_tsg_unbind_channel_check_ctx_reload,
|
||||
.unbind_channel_check_eng_faulted = NULL,
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.force_reset = nvgpu_tsg_force_reset_ch,
|
||||
.post_event_id = nvgpu_tsg_post_event_id,
|
||||
#endif
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
.set_timeslice = nvgpu_tsg_set_timeslice,
|
||||
#endif
|
||||
|
||||
@@ -359,7 +359,9 @@ static const struct gpu_ops gp10b_ops = {
|
||||
#endif /* NVGPU_GRAPHICS */
|
||||
.alloc_obj_ctx = nvgpu_gr_setup_alloc_obj_ctx,
|
||||
.free_gr_ctx = nvgpu_gr_setup_free_gr_ctx,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.set_preemption_mode = nvgpu_gr_setup_set_preemption_mode,
|
||||
#endif
|
||||
},
|
||||
#ifdef NVGPU_GRAPHICS
|
||||
.zbc = {
|
||||
@@ -827,8 +829,10 @@ static const struct gpu_ops gp10b_ops = {
|
||||
nvgpu_tsg_unbind_channel_check_ctx_reload,
|
||||
.unbind_channel_check_eng_faulted = NULL,
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.force_reset = nvgpu_tsg_force_reset_ch,
|
||||
.post_event_id = nvgpu_tsg_post_event_id,
|
||||
#endif
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
.set_timeslice = nvgpu_tsg_set_timeslice,
|
||||
#endif
|
||||
|
||||
@@ -439,7 +439,9 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.alloc_obj_ctx = nvgpu_gr_setup_alloc_obj_ctx,
|
||||
.free_gr_ctx = nvgpu_gr_setup_free_gr_ctx,
|
||||
.free_subctx = nvgpu_gr_setup_free_subctx,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.set_preemption_mode = nvgpu_gr_setup_set_preemption_mode,
|
||||
#endif
|
||||
},
|
||||
#ifdef NVGPU_GRAPHICS
|
||||
.zbc = {
|
||||
@@ -963,8 +965,10 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.unbind_channel_check_eng_faulted =
|
||||
gv11b_tsg_unbind_channel_check_eng_faulted,
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.force_reset = nvgpu_tsg_force_reset_ch,
|
||||
.post_event_id = nvgpu_tsg_post_event_id,
|
||||
#endif
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
.set_timeslice = nvgpu_tsg_set_timeslice,
|
||||
#endif
|
||||
|
||||
@@ -475,7 +475,9 @@ static const struct gpu_ops tu104_ops = {
|
||||
.alloc_obj_ctx = nvgpu_gr_setup_alloc_obj_ctx,
|
||||
.free_gr_ctx = nvgpu_gr_setup_free_gr_ctx,
|
||||
.free_subctx = nvgpu_gr_setup_free_subctx,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.set_preemption_mode = nvgpu_gr_setup_set_preemption_mode,
|
||||
#endif
|
||||
},
|
||||
#ifdef NVGPU_GRAPHICS
|
||||
.zbc = {
|
||||
@@ -1007,8 +1009,10 @@ static const struct gpu_ops tu104_ops = {
|
||||
.unbind_channel_check_eng_faulted =
|
||||
gv11b_tsg_unbind_channel_check_eng_faulted,
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
.force_reset = nvgpu_tsg_force_reset_ch,
|
||||
.post_event_id = nvgpu_tsg_post_event_id,
|
||||
#endif
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||
.set_timeslice = nvgpu_tsg_set_timeslice,
|
||||
#endif
|
||||
|
||||
@@ -641,9 +641,11 @@ struct gpu_ops {
|
||||
struct vm_gk20a *vm,
|
||||
struct nvgpu_gr_ctx *gr_ctx);
|
||||
void (*free_subctx)(struct nvgpu_channel *c);
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
int (*set_preemption_mode)(struct nvgpu_channel *ch,
|
||||
u32 graphics_preempt_mode,
|
||||
u32 compute_preempt_mode);
|
||||
#endif
|
||||
} setup;
|
||||
#ifdef NVGPU_GRAPHICS
|
||||
struct {
|
||||
@@ -1217,10 +1219,12 @@ struct gpu_ops {
|
||||
struct nvgpu_channel_hw_state *state);
|
||||
bool (*check_ctxsw_timeout)(struct nvgpu_tsg *tsg,
|
||||
bool *verbose, u32 *ms);
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
int (*force_reset)(struct nvgpu_channel *ch,
|
||||
u32 err_code, bool verbose);
|
||||
void (*post_event_id)(struct nvgpu_tsg *tsg,
|
||||
enum nvgpu_event_id_type event_id);
|
||||
#endif
|
||||
int (*set_timeslice)(struct nvgpu_tsg *tsg, u32 timeslice_us);
|
||||
u32 (*default_timeslice_us)(struct gk20a *g);
|
||||
int (*set_interleave)(struct nvgpu_tsg *tsg, u32 new_level);
|
||||
|
||||
@@ -40,8 +40,10 @@ void nvgpu_gr_setup_free_gr_ctx(struct gk20a *g,
|
||||
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
|
||||
void nvgpu_gr_setup_free_subctx(struct nvgpu_channel *c);
|
||||
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
|
||||
u32 graphics_preempt_mode,
|
||||
u32 compute_preempt_mode);
|
||||
#endif
|
||||
|
||||
#endif /* NVGPU_GR_SETUP_H */
|
||||
|
||||
@@ -115,14 +115,16 @@ int nvgpu_tsg_unbind_channel_check_hw_state(struct nvgpu_tsg *tsg,
|
||||
void nvgpu_tsg_unbind_channel_check_ctx_reload(struct nvgpu_tsg *tsg,
|
||||
struct nvgpu_channel *ch,
|
||||
struct nvgpu_channel_hw_state *hw_state);
|
||||
#ifdef NVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||
int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
|
||||
u32 err_code, bool verbose);
|
||||
#endif
|
||||
void nvgpu_tsg_post_event_id(struct nvgpu_tsg *tsg,
|
||||
enum nvgpu_event_id_type event_id);
|
||||
void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g,
|
||||
struct nvgpu_tsg *tsg);
|
||||
bool nvgpu_tsg_mark_error(struct gk20a *g, struct nvgpu_tsg *tsg);
|
||||
|
||||
void nvgpu_tsg_post_event_id(struct nvgpu_tsg *tsg,
|
||||
enum nvgpu_event_id_type event_id);
|
||||
bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
|
||||
bool *debug_dump, u32 *ms);
|
||||
int nvgpu_tsg_set_runlist_interleave(struct nvgpu_tsg *tsg, u32 level);
|
||||
|
||||
Reference in New Issue
Block a user