gpu: nvgpu: Remove NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE API

Remove the API as it has no use cases.

Bug 200445906

Change-Id: Ia2803bd05d78853963011a67091b34ba5bdb3732
Signed-off-by: Anup Mahindre <amahindre@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1817629
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Anup Mahindre
2018-09-05 10:02:35 +05:30
committed by mobile promotions
parent a77bce7193
commit 7e591dced9
14 changed files with 0 additions and 274 deletions

View File

@@ -1306,22 +1306,6 @@ static void gm20b_gr_read_sm_error_state(struct gk20a *g,
}
static void gm20b_gr_write_sm_error_state(struct gk20a *g,
u32 offset,
struct nvgpu_tsg_sm_error_state *sm_error_states)
{
gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
sm_error_states->hww_global_esr);
gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset,
sm_error_states->hww_warp_esr);
gk20a_writel(g, gr_gpc0_tpc0_sm_hww_warp_esr_pc_r() + offset,
u64_lo32(sm_error_states->hww_warp_esr_pc));
gk20a_writel(g, gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
sm_error_states->hww_global_esr_report_mask);
gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
sm_error_states->hww_warp_esr_report_mask);
}
int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
struct channel_gk20a *fault_ch)
{
@@ -1356,70 +1340,6 @@ record_fail:
return sm_id;
}
int gm20b_gr_update_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state)
{
u32 gpc, tpc, offset;
struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *ch_ctx;
struct nvgpu_tsg_sm_error_state *tsg_sm_error_states;
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
GPU_LIT_TPC_IN_GPC_STRIDE);
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
if (!tsg) {
return -EINVAL;
}
ch_ctx = &tsg->gr_ctx;
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
tsg_sm_error_states = tsg->sm_error_states + sm_id;
gk20a_tsg_update_sm_error_state_locked(tsg, sm_id, sm_error_state);
err = gr_gk20a_disable_ctxsw(g);
if (err) {
nvgpu_err(g, "unable to stop gr ctxsw");
goto fail;
}
gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
if (gk20a_is_channel_ctx_resident(ch)) {
gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
} else {
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
if (err) {
goto enable_ctxsw;
}
gr_gk20a_ctx_patch_write(g, ch_ctx,
gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
tsg_sm_error_states->hww_global_esr_report_mask,
true);
gr_gk20a_ctx_patch_write(g, ch_ctx,
gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset,
tsg_sm_error_states->hww_warp_esr_report_mask,
true);
gr_gk20a_ctx_patch_write_end(g, ch_ctx, false);
}
enable_ctxsw:
err = gr_gk20a_enable_ctxsw(g);
fail:
nvgpu_mutex_release(&g->dbg_sessions_lock);
return err;
}
int gm20b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id)
{

View File

@@ -117,9 +117,6 @@ void gr_gm20b_get_access_map(struct gk20a *g,
u32 **whitelist, int *num_entries);
int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc,
u32 tpc, u32 sm, struct channel_gk20a *fault_ch);
int gm20b_gr_update_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state);
int gm20b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id);
int gr_gm20b_get_preemption_mode_flags(struct gk20a *g,

View File

@@ -275,7 +275,6 @@ static const struct gpu_ops gm20b_ops = {
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gm20b_gr_record_sm_error_state,
.update_sm_error_state = gm20b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gk20a_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,

View File

@@ -338,7 +338,6 @@ static const struct gpu_ops gp106_ops = {
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gm20b_gr_record_sm_error_state,
.update_sm_error_state = gm20b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,

View File

@@ -295,7 +295,6 @@ static const struct gpu_ops gp10b_ops = {
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gm20b_gr_record_sm_error_state,
.update_sm_error_state = gm20b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,

View File

@@ -377,7 +377,6 @@ static const struct gpu_ops gv100_ops = {
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.init_hwpm_pmm_register = gr_gv100_init_hwpm_pmm_register,
.record_sm_error_state = gv11b_gr_record_sm_error_state,
.update_sm_error_state = gv11b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,

View File

@@ -3228,96 +3228,6 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
}
}
static void gv11b_gr_write_sm_error_state(struct gk20a *g,
u32 offset,
struct nvgpu_tsg_sm_error_state *sm_error_states)
{
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
sm_error_states->hww_global_esr);
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset,
sm_error_states->hww_warp_esr);
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r() + offset,
u64_lo32(sm_error_states->hww_warp_esr_pc));
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_warp_esr_pc_hi_r() + offset,
u64_hi32(sm_error_states->hww_warp_esr_pc));
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset,
sm_error_states->hww_global_esr_report_mask);
nvgpu_writel(g,
gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset,
sm_error_states->hww_warp_esr_report_mask);
}
int gv11b_gr_update_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state)
{
struct tsg_gk20a *tsg;
u32 gpc, tpc, sm, offset;
struct nvgpu_gr_ctx *ch_ctx;
int err = 0;
struct nvgpu_tsg_sm_error_state *tsg_sm_error_states;
tsg = tsg_gk20a_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}
ch_ctx = &tsg->gr_ctx;
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
tsg_sm_error_states = tsg->sm_error_states + sm_id;
gk20a_tsg_update_sm_error_state_locked(tsg, sm_id, sm_error_state);
err = gr_gk20a_disable_ctxsw(g);
if (err) {
nvgpu_err(g, "unable to stop gr ctxsw");
goto fail;
}
gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
sm = g->gr.sm_to_cluster[sm_id].sm_index;
offset = gk20a_gr_gpc_offset(g, gpc) +
gk20a_gr_tpc_offset(g, tpc) +
gv11b_gr_sm_offset(g, sm);
if (gk20a_is_channel_ctx_resident(ch)) {
gv11b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
} else {
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
if (err) {
goto enable_ctxsw;
}
gr_gk20a_ctx_patch_write(g, ch_ctx,
gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r() +
offset,
tsg_sm_error_states->hww_global_esr_report_mask,
true);
gr_gk20a_ctx_patch_write(g, ch_ctx,
gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r() +
offset,
tsg_sm_error_states->hww_warp_esr_report_mask,
true);
gr_gk20a_ctx_patch_write_end(g, ch_ctx, false);
}
enable_ctxsw:
err = gr_gk20a_enable_ctxsw(g);
fail:
nvgpu_mutex_release(&g->dbg_sessions_lock);
return err;
}
int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable)
{

View File

@@ -166,9 +166,6 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
u32 *esr_sm_sel);
int gv11b_gr_sm_trigger_suspend(struct gk20a *g);
void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gv11b_gr_update_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state);
int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable);
int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,

View File

@@ -329,7 +329,6 @@ static const struct gpu_ops gv11b_ops = {
.update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
.record_sm_error_state = gv11b_gr_record_sm_error_state,
.update_sm_error_state = gv11b_gr_update_sm_error_state,
.clear_sm_error_state = gm20b_gr_clear_sm_error_state,
.suspend_contexts = gr_gp10b_suspend_contexts,
.resume_contexts = gr_gk20a_resume_contexts,

View File

@@ -397,9 +397,6 @@ struct gpu_ops {
u32 (*get_lrf_tex_ltc_dram_override)(struct gk20a *g);
int (*record_sm_error_state)(struct gk20a *g, u32 gpc, u32 tpc,
u32 sm, struct channel_gk20a *fault_ch);
int (*update_sm_error_state)(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state);
int (*clear_sm_error_state)(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id);
int (*suspend_contexts)(struct gk20a *g,

View File

@@ -260,78 +260,6 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
return err;
}
static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args)
{
struct gk20a *g = dbg_s->g;
struct gr_gk20a *gr = &g->gr;
u32 sm_id;
struct channel_gk20a *ch;
struct nvgpu_dbg_gpu_sm_error_state_record sm_error_state_record;
struct nvgpu_tsg_sm_error_state sm_error_state;
int err = 0;
/* Not currently supported in the virtual case */
if (g->is_virtual) {
return -ENOSYS;
}
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (ch == NULL) {
return -EINVAL;
}
sm_id = args->sm_id;
if (sm_id >= gr->no_of_sm) {
return -EINVAL;
}
nvgpu_speculation_barrier();
if (args->sm_error_state_record_size > 0) {
size_t read_size = sizeof(sm_error_state_record);
if (read_size > args->sm_error_state_record_size)
read_size = args->sm_error_state_record_size;
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = copy_from_user(&sm_error_state_record,
(void __user *)(uintptr_t)
args->sm_error_state_record_mem,
read_size);
nvgpu_mutex_release(&g->dbg_sessions_lock);
if (err != 0) {
return -ENOMEM;
}
}
err = gk20a_busy(g);
if (err != 0) {
return err;
}
sm_error_state.hww_global_esr =
sm_error_state_record.hww_global_esr;
sm_error_state.hww_warp_esr =
sm_error_state_record.hww_warp_esr;
sm_error_state.hww_warp_esr_pc =
sm_error_state_record.hww_warp_esr_pc;
sm_error_state.hww_global_esr_report_mask =
sm_error_state_record.hww_global_esr_report_mask;
sm_error_state.hww_warp_esr_report_mask =
sm_error_state_record.hww_warp_esr_report_mask;
err = gr_gk20a_elpg_protected_call(g,
g->ops.gr.update_sm_error_state(g, ch,
sm_id, &sm_error_state));
gk20a_idle(g);
return err;
}
static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args)
@@ -2066,11 +1994,6 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
(struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf);
break;
case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE:
err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s,
(struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf);
break;
case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL:
err = dbg_unbind_channel_gk20a(dbg_s,
(struct nvgpu_dbg_gpu_unbind_channel_args *)buf);

View File

@@ -168,7 +168,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
.record_sm_error_state = gm20b_gr_record_sm_error_state,
.update_sm_error_state = NULL,
.clear_sm_error_state = vgpu_gr_clear_sm_error_state,
.suspend_contexts = vgpu_gr_suspend_contexts,
.resume_contexts = vgpu_gr_resume_contexts,

View File

@@ -185,7 +185,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
.update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
.record_sm_error_state = gv11b_gr_record_sm_error_state,
.update_sm_error_state = NULL,
.clear_sm_error_state = vgpu_gr_clear_sm_error_state,
.suspend_contexts = vgpu_gr_suspend_contexts,
.resume_contexts = vgpu_gr_resume_contexts,

View File

@@ -1329,17 +1329,6 @@ struct nvgpu_dbg_gpu_clear_single_sm_error_state_args {
#define NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE \
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 15, struct nvgpu_dbg_gpu_clear_single_sm_error_state_args)
struct nvgpu_dbg_gpu_write_single_sm_error_state_args {
__u32 sm_id;
__u32 padding;
__u64 sm_error_state_record_mem;
__u64 sm_error_state_record_size;
};
#define NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE \
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 16, struct nvgpu_dbg_gpu_write_single_sm_error_state_args)
/*
* Unbinding/detaching a debugger session from a nvgpu channel
*