gpu: nvgpu: Deprecate NVGPU_GPU_IOCTL_INVAL_ICACHE

Deprecate NVGPU_GPU_IOCTL_INVAL_ICACHE as it is unused and has
a broken implementation.

Bug 200439908

Change-Id: Iab6f08cf3dd4853ba6c95cbc8443331bf505e514
Signed-off-by: Anup Mahindre <amahindre@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1800797
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Anup Mahindre
2018-08-16 10:20:15 +05:30
committed by mobile promotions
parent b15624b39b
commit f5f1875b2a
12 changed files with 0 additions and 86 deletions

View File

@@ -437,7 +437,6 @@ struct gpu_ops {
void (*set_preemption_buffer_va)(struct gk20a *g,
struct nvgpu_mem *mem, u64 gpu_va);
void (*load_tpc_mask)(struct gk20a *g);
int (*inval_icache)(struct gk20a *g, struct channel_gk20a *ch);
int (*trigger_suspend)(struct gk20a *g);
int (*wait_for_pause)(struct gk20a *g, struct nvgpu_warpstate *w_state);
int (*resume_from_pause)(struct gk20a *g);

View File

@@ -8549,51 +8549,6 @@ clean_up:
return err;
}
int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch)
{
int err = 0;
u32 cache_ctrl, regval;
struct nvgpu_dbg_reg_op ops;
ops.op = REGOP(READ_32);
ops.type = REGOP(TYPE_GR_CTX);
ops.status = REGOP(STATUS_SUCCESS);
ops.value_hi = 0;
ops.and_n_mask_lo = 0;
ops.and_n_mask_hi = 0;
ops.offset = gr_pri_gpc0_gcc_dbg_r();
err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
if (err) {
nvgpu_err(g, "Failed to read register");
return err;
}
regval = ops.value_lo;
ops.op = REGOP(WRITE_32);
ops.value_lo = set_field(regval, gr_pri_gpcs_gcc_dbg_invalidate_m(), 1);
err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 1, 0);
if (err) {
nvgpu_err(g, "Failed to write register");
return err;
}
ops.op = REGOP(READ_32);
ops.offset = gr_pri_gpc0_tpc0_sm_cache_control_r();
err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1);
if (err) {
nvgpu_err(g, "Failed to read register");
return err;
}
cache_ctrl = gk20a_readl(g, gr_pri_gpc0_tpc0_sm_cache_control_r());
cache_ctrl = set_field(cache_ctrl, gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(), 1);
gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cache_control_r(), cache_ctrl);
return 0;
}
int gr_gk20a_trigger_suspend(struct gk20a *g)
{
int err = 0;

View File

@@ -775,7 +775,6 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
int *ctx_resident_ch_fd);
void gk20a_gr_enable_gpc_exceptions(struct gk20a *g);
void gk20a_gr_enable_exceptions(struct gk20a *g);
int gr_gk20a_inval_icache(struct gk20a *g, struct channel_gk20a *ch);
int gr_gk20a_trigger_suspend(struct gk20a *g);
int gr_gk20a_wait_for_pause(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gr_gk20a_resume_from_pause(struct gk20a *g);

View File

@@ -292,7 +292,6 @@ static const struct gpu_ops gm20b_ops = {
.write_zcull_ptr = gr_gk20a_write_zcull_ptr,
.write_pm_ptr = gr_gk20a_write_pm_ptr,
.load_tpc_mask = gr_gm20b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gr_gk20a_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gr_gk20a_resume_from_pause,

View File

@@ -352,7 +352,6 @@ static const struct gpu_ops gp106_ops = {
.write_zcull_ptr = gr_gk20a_write_zcull_ptr,
.write_pm_ptr = gr_gk20a_write_pm_ptr,
.load_tpc_mask = gr_gm20b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gr_gk20a_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gr_gk20a_resume_from_pause,

View File

@@ -311,7 +311,6 @@ static const struct gpu_ops gp10b_ops = {
.write_zcull_ptr = gr_gk20a_write_zcull_ptr,
.write_pm_ptr = gr_gk20a_write_pm_ptr,
.load_tpc_mask = gr_gm20b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gr_gk20a_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gr_gk20a_resume_from_pause,

View File

@@ -389,7 +389,6 @@ static const struct gpu_ops gv100_ops = {
.write_zcull_ptr = gr_gv11b_write_zcull_ptr,
.write_pm_ptr = gr_gv11b_write_pm_ptr,
.load_tpc_mask = gr_gv11b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gv11b_gr_sm_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gv11b_gr_resume_from_pause,

View File

@@ -344,7 +344,6 @@ static const struct gpu_ops gv11b_ops = {
.write_zcull_ptr = gr_gv11b_write_zcull_ptr,
.write_pm_ptr = gr_gv11b_write_pm_ptr,
.load_tpc_mask = gr_gv11b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gv11b_gr_sm_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gv11b_gr_resume_from_pause,

View File

@@ -578,27 +578,6 @@ static int nvgpu_gpu_ioctl_l2_fb_ops(struct gk20a *g,
return err;
}
/* Invalidate i-cache for kepler & maxwell */
static int nvgpu_gpu_ioctl_inval_icache(
struct gk20a *g,
struct nvgpu_gpu_inval_icache_args *args)
{
struct channel_gk20a *ch;
int err;
ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch)
return -EINVAL;
/* Take the global lock, since we'll be doing global regops */
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.inval_icache(g, ch);
nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_channel_put(ch);
return err;
}
static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
struct gk20a *g,
struct nvgpu_gpu_mmu_debug_mode_args *args)
@@ -1824,10 +1803,6 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
err = nvgpu_gpu_ioctl_l2_fb_ops(g,
(struct nvgpu_gpu_l2_fb_args *)buf);
break;
case NVGPU_GPU_IOCTL_INVAL_ICACHE:
err = gr_gk20a_elpg_protected_call(g,
nvgpu_gpu_ioctl_inval_icache(g, (struct nvgpu_gpu_inval_icache_args *)buf));
break;
case NVGPU_GPU_IOCTL_SET_MMUDEBUG_MODE:
err = nvgpu_gpu_ioctl_set_mmu_debug_mode(g,

View File

@@ -185,7 +185,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.write_zcull_ptr = gr_gk20a_write_zcull_ptr,
.write_pm_ptr = gr_gk20a_write_pm_ptr,
.load_tpc_mask = gr_gm20b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gr_gk20a_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gr_gk20a_resume_from_pause,

View File

@@ -203,7 +203,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.write_zcull_ptr = gr_gv11b_write_zcull_ptr,
.write_pm_ptr = gr_gv11b_write_pm_ptr,
.load_tpc_mask = gr_gv11b_load_tpc_mask,
.inval_icache = gr_gk20a_inval_icache,
.trigger_suspend = gv11b_gr_sm_trigger_suspend,
.wait_for_pause = gr_gk20a_wait_for_pause,
.resume_from_pause = gv11b_gr_resume_from_pause,

View File

@@ -404,11 +404,6 @@ struct nvgpu_gpu_l2_fb_args {
__u32 reserved;
} __packed;
struct nvgpu_gpu_inval_icache_args {
int channel_fd;
__u32 reserved;
} __packed;
struct nvgpu_gpu_mmu_debug_mode_args {
__u32 state;
__u32 reserved;
@@ -922,8 +917,6 @@ struct nvgpu_gpu_read_single_sm_error_state_args {
_IOWR(NVGPU_GPU_IOCTL_MAGIC, 11, struct nvgpu_gpu_open_channel_args)
#define NVGPU_GPU_IOCTL_FLUSH_L2 \
_IOWR(NVGPU_GPU_IOCTL_MAGIC, 12, struct nvgpu_gpu_l2_fb_args)
#define NVGPU_GPU_IOCTL_INVAL_ICACHE \
_IOWR(NVGPU_GPU_IOCTL_MAGIC, 13, struct nvgpu_gpu_inval_icache_args)
#define NVGPU_GPU_IOCTL_SET_MMUDEBUG_MODE \
_IOWR(NVGPU_GPU_IOCTL_MAGIC, 14, struct nvgpu_gpu_mmu_debug_mode_args)
#define NVGPU_GPU_IOCTL_SET_SM_DEBUG_MODE \