gpu: nvgpu: move gk20a_gr_flush_channel_tlb to common.gr.init

Move gk20a_gr_flush_channel_tlb function to common.gr.init as
nvgpu_gr_flush_channel_tlb function.

JIRA NVGPU-1885

Change-Id: I4979266d826b0d188b09bbad156103bb11005c84
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2081368
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-03-25 17:16:17 -07:00
committed by mobile promotions
parent 83d1a0efc6
commit 4777c81f82
5 changed files with 14 additions and 14 deletions

View File

@@ -46,6 +46,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/subctx.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/channel.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/runlist.h>
@@ -323,7 +324,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
{
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
struct gr_gk20a *gr = &g->gr;
struct vm_gk20a *ch_vm = ch->vm;
unsigned long timeout = gk20a_get_gr_idle_timeout(g);
struct dbg_session_gk20a *dbg_s;
@@ -438,7 +438,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
ch->usermode_submit_enabled = false;
}
gk20a_gr_flush_channel_tlb(gr);
nvgpu_gr_flush_channel_tlb(g);
nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
nvgpu_big_free(g, ch->gpfifo.pipe);

View File

@@ -79,6 +79,16 @@ static void gr_load_tpc_mask(struct gk20a *g)
g->ops.gr.init.tpc_mask(g, 0, pes_tpc_mask);
}
/* invalidate channel lookup tlb */
void nvgpu_gr_flush_channel_tlb(struct gk20a *g)
{
nvgpu_spinlock_acquire(&g->gr.ch_tlb_lock);
(void) memset(g->gr.chid_tlb, 0,
sizeof(struct gr_channel_map_tlb_entry) *
GR_CHANNEL_MAP_TLB_SIZE);
nvgpu_spinlock_release(&g->gr.ch_tlb_lock);
}
u32 nvgpu_gr_get_idle_timeout(struct gk20a *g)
{
return nvgpu_is_timeouts_enabled(g) ?

View File

@@ -3831,7 +3831,7 @@ int gk20a_gr_suspend(struct gk20a *g)
/* disable all exceptions */
g->ops.gr.intr.enable_exceptions(g, g->gr.config, false);
gk20a_gr_flush_channel_tlb(&g->gr);
nvgpu_gr_flush_channel_tlb(g);
g->gr.initialized = false;
@@ -5909,16 +5909,6 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
return global_esr_mask;
}
/* invalidate channel lookup tlb */
void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr)
{
nvgpu_spinlock_acquire(&gr->ch_tlb_lock);
(void) memset(gr->chid_tlb, 0,
sizeof(struct gr_channel_map_tlb_entry) *
GR_CHANNEL_MAP_TLB_SIZE);
nvgpu_spinlock_release(&gr->ch_tlb_lock);
}
u32 gk20a_gr_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g)
{
return nvgpu_readl(g, gr_fecs_ctx_state_store_major_rev_id_r());

View File

@@ -493,7 +493,6 @@ void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
struct gr_ctx_buffer_desc *desc);
int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
struct gr_ctx_buffer_desc *desc, size_t size);
void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr);
u32 gk20a_gr_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g);
u32 gr_gk20a_fecs_falcon_base_addr(void);

View File

@@ -28,6 +28,7 @@
#define NVGPU_GR_IDLE_CHECK_DEFAULT_US 10U
#define NVGPU_GR_IDLE_CHECK_MAX_US 200U
void nvgpu_gr_flush_channel_tlb(struct gk20a *g);
u32 nvgpu_gr_get_idle_timeout(struct gk20a *g);
int nvgpu_gr_init_fs_state(struct gk20a *g);