mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Remove extraneous FB flush calls
gk20a_mm_fb_flush() invoked G_ELPG_FLUSH and FB_FLUSH. Remove the invokation of G_ELPG_FLUSH. Replace calls to gk20a_mm_fb_flush() with gk20a_mm_l2_flush() when appropriate. Bug 1421824 Change-Id: I02af4bdc3b7bd26d0f6a8d610f70349269775a36 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/408210 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
Dan Willemsen
parent
846f0c4f41
commit
bcf8c6411c
@@ -655,8 +655,6 @@ static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
|
|||||||
|
|
||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
gk20a_mm_fb_flush(c->g);
|
|
||||||
|
|
||||||
inst_ptr = c->inst_block.cpuva;
|
inst_ptr = c->inst_block.cpuva;
|
||||||
if (!inst_ptr)
|
if (!inst_ptr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1573,7 +1571,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
|
|||||||
ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
|
ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
|
||||||
ctx_header_words >>= 2;
|
ctx_header_words >>= 2;
|
||||||
|
|
||||||
gk20a_mm_fb_flush(g);
|
gk20a_mm_l2_flush(g, true);
|
||||||
|
|
||||||
for (i = 0; i < ctx_header_words; i++) {
|
for (i = 0; i < ctx_header_words; i++) {
|
||||||
data = gk20a_mem_rd32(ctx_ptr, i);
|
data = gk20a_mem_rd32(ctx_ptr, i);
|
||||||
@@ -1634,11 +1632,9 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
|||||||
void *ctx_ptr = NULL;
|
void *ctx_ptr = NULL;
|
||||||
u32 data;
|
u32 data;
|
||||||
|
|
||||||
/*XXX caller responsible for making sure the channel is quiesced? */
|
|
||||||
|
|
||||||
/* Channel gr_ctx buffer is gpu cacheable.
|
/* Channel gr_ctx buffer is gpu cacheable.
|
||||||
Flush and invalidate before cpu update. */
|
Flush and invalidate before cpu update. */
|
||||||
gk20a_mm_fb_flush(g);
|
gk20a_mm_l2_flush(g, true);
|
||||||
|
|
||||||
ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
|
ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
|
||||||
PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
|
PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
|
||||||
@@ -1678,7 +1674,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
|
|||||||
|
|
||||||
/* Channel gr_ctx buffer is gpu cacheable.
|
/* Channel gr_ctx buffer is gpu cacheable.
|
||||||
Flush and invalidate before cpu update. */
|
Flush and invalidate before cpu update. */
|
||||||
gk20a_mm_fb_flush(g);
|
gk20a_mm_l2_flush(g, true);
|
||||||
|
|
||||||
ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
|
ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
|
||||||
PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
|
PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
|
||||||
@@ -6686,7 +6682,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_mm_fb_flush(g);
|
gk20a_mm_l2_flush(g, true);
|
||||||
|
|
||||||
/* write to appropriate place in context image,
|
/* write to appropriate place in context image,
|
||||||
* first have to figure out where that really is */
|
* first have to figure out where that really is */
|
||||||
|
|||||||
@@ -2801,8 +2801,6 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
|||||||
|
|
||||||
mutex_lock(&mm->l2_op_lock);
|
mutex_lock(&mm->l2_op_lock);
|
||||||
|
|
||||||
g->ops.ltc.elpg_flush(g);
|
|
||||||
|
|
||||||
/* Make sure all previous writes are committed to the L2. There's no
|
/* Make sure all previous writes are committed to the L2. There's no
|
||||||
guarantee that writes are to DRAM. This will be a sysmembar internal
|
guarantee that writes are to DRAM. This will be a sysmembar internal
|
||||||
to the L2. */
|
to the L2. */
|
||||||
|
|||||||
Reference in New Issue
Block a user