gpu: nvgpu: Remove ELPG_FLUSH

ELPG_FLUSH is not accessible in later GPUs, so we stopped using it
and instead do explicit CBC and L2 flushes. Delete the unused
function op and backing code.

Change-Id: Ic3eb97f2d32ea8fdbe5ec57bd9254268caaf9935
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1323236
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-03-17 09:55:43 -07:00
committed by mobile promotions
parent 74fe1caa2b
commit 866fee0247
6 changed files with 0 additions and 101 deletions

View File

@@ -148,7 +148,6 @@ struct gpu_ops {
void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr);
void (*sync_debugfs)(struct gk20a *g);
void (*init_fs_state)(struct gk20a *g);
void (*elpg_flush)(struct gk20a *g);
void (*isr)(struct gk20a *g);
u32 (*cbc_fix_config)(struct gk20a *g, int base);
void (*flush)(struct gk20a *g);

View File

@@ -189,44 +189,6 @@ static void gk20a_ltc_isr(struct gk20a *g)
gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
}
/* Flushes the compression bit cache as well as "data".
* Note: the name here is a bit of a misnomer. ELPG uses this
* internally... but ELPG doesn't have to be on to do it manually.
*/
static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
{
u32 data;
s32 retry = 100;
gk20a_dbg_fn("");
trace_gk20a_mm_g_elpg_flush_locked(dev_name(g->dev));
/* Make sure all previous writes are committed to the L2. There's no
guarantee that writes are to DRAM. This will be a sysmembar internal
to the L2. */
gk20a_writel(g, ltc_ltcs_ltss_g_elpg_r(),
ltc_ltcs_ltss_g_elpg_flush_pending_f());
do {
data = gk20a_readl(g, ltc_ltc0_ltss_g_elpg_r());
if (ltc_ltc0_ltss_g_elpg_flush_v(data) ==
ltc_ltc0_ltss_g_elpg_flush_pending_v()) {
gk20a_dbg_info("g_elpg_flush 0x%x", data);
retry--;
udelay(5);
} else
break;
} while (retry >= 0 || !tegra_platform_is_silicon());
if (tegra_platform_is_silicon() && retry < 0)
gk20a_warn(dev_from_gk20a(g),
"g_elpg_flush too many retries");
trace_gk20a_mm_g_elpg_flush_locked_done(dev_name(g->dev));
}
static int gk20a_determine_L2_size_bytes(struct gk20a *g)
{
u32 lts_per_ltc;
@@ -279,7 +241,6 @@ void gk20a_init_ltc(struct gpu_ops *gops)
#ifdef CONFIG_DEBUG_FS
gops->ltc.sync_debugfs = gk20a_ltc_sync_debugfs;
#endif
gops->ltc.elpg_flush = gk20a_mm_g_elpg_flush_locked;
gops->ltc.init_fs_state = gk20a_ltc_init_fs_state;
gops->ltc.isr = gk20a_ltc_isr;
}

View File

@@ -221,54 +221,6 @@ void gm20b_ltc_isr(struct gk20a *g)
}
}
void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
{
u32 data;
bool done[g->ltc_count];
s32 retry = 100;
unsigned int i;
unsigned int num_done = 0;
u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r();
gk20a_dbg_fn("");
trace_gk20a_mm_g_elpg_flush_locked(dev_name(g->dev));
for (i = 0; i < g->ltc_count; i++)
done[i] = 0;
gk20a_writel(g, ltc_ltcs_ltss_g_elpg_r(),
ltc_ltcs_ltss_g_elpg_flush_pending_f());
do {
for (i = 0; i < g->ltc_count; i++) {
if (done[i])
continue;
data = gk20a_readl(g,
ltc_ltc0_ltss_g_elpg_r() + ltc_d * i);
if (ltc_ltc0_ltss_g_elpg_flush_v(data)) {
gk20a_dbg_info("g_elpg_flush 0x%x", data);
} else {
done[i] = 1;
num_done++;
}
}
if (num_done < g->ltc_count) {
retry--;
udelay(5);
} else
break;
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0 && tegra_platform_is_silicon())
gk20a_warn(dev_from_gk20a(g),
"g_elpg_flush too many retries");
trace_gk20a_mm_g_elpg_flush_locked_done(dev_name(g->dev));
}
u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
{
u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
@@ -412,7 +364,6 @@ void gm20b_init_ltc(struct gpu_ops *gops)
gops->ltc.init_fs_state = gm20b_ltc_init_fs_state;
gops->ltc.init_comptags = gm20b_ltc_init_comptags;
gops->ltc.cbc_ctrl = gm20b_ltc_cbc_ctrl;
gops->ltc.elpg_flush = gm20b_ltc_g_elpg_flush_locked;
gops->ltc.isr = gm20b_ltc_isr;
gops->ltc.cbc_fix_config = gm20b_ltc_cbc_fix_config;
gops->ltc.flush = gm20b_flush_ltc;

View File

@@ -21,7 +21,6 @@ void gm20b_init_ltc(struct gpu_ops *gops);
void gm20b_ltc_init_fs_state(struct gk20a *g);
int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
u32 min, u32 max);
void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g);
void gm20b_ltc_isr(struct gk20a *g);
u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base);
void gm20b_flush_ltc(struct gk20a *g);

View File

@@ -216,7 +216,6 @@ void gp10b_init_ltc(struct gpu_ops *gops)
gops->ltc.init_fs_state = gp10b_ltc_init_fs_state;
gops->ltc.init_comptags = gp10b_ltc_init_comptags;
gops->ltc.cbc_ctrl = gm20b_ltc_cbc_ctrl;
gops->ltc.elpg_flush = gm20b_ltc_g_elpg_flush_locked;
gops->ltc.isr = gp10b_ltc_isr;
gops->ltc.cbc_fix_config = gm20b_ltc_cbc_fix_config;
gops->ltc.flush = gm20b_flush_ltc;

View File

@@ -120,16 +120,6 @@ DEFINE_EVENT(gk20a, gr_gk20a_handle_sw_method,
TP_ARGS(name)
);
DEFINE_EVENT(gk20a, gk20a_mm_g_elpg_flush_locked,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(gk20a, gk20a_mm_g_elpg_flush_locked_done,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DECLARE_EVENT_CLASS(gk20a_channel,
TP_PROTO(int channel),
TP_ARGS(channel),