mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: add cache maintenance timeout override
Add functions to get per-chip cache maintenance timeout overrides. JIRA: NVGPUGV100-GV100 Change-Id: Ie14efc616e7af52ede60031c789bd2ae70857a6e Signed-off-by: David Nieto <dmartineznie@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1582768 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1cee7b2a39
commit
fea32c74dc
@@ -137,6 +137,7 @@ enum gk20a_cbc_op {
|
||||
|
||||
enum nvgpu_unit;
|
||||
|
||||
enum nvgpu_flush_op;
|
||||
/*
|
||||
* gpu_ops should only contain function pointers! Non-function pointer members
|
||||
* should go in struct gk20a or be implemented with the boolean flag API defined
|
||||
@@ -569,6 +570,7 @@ struct gpu_ops {
|
||||
struct tsg_gk20a *tsg);
|
||||
void (*deinit_eng_method_buffers)(struct gk20a *g,
|
||||
struct tsg_gk20a *tsg);
|
||||
u32 (*get_preempt_timeout)(struct gk20a *g);
|
||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||
int (*alloc_syncpt_buf)(struct channel_gk20a *c,
|
||||
u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
|
||||
@@ -760,6 +762,8 @@ struct gpu_ops {
|
||||
void (*fault_info_mem_destroy)(struct gk20a *g);
|
||||
u32 (*get_kind_invalid)(void);
|
||||
u32 (*get_kind_pitch)(void);
|
||||
u32 (*get_flush_retries)(struct gk20a *g,
|
||||
enum nvgpu_flush_op op);
|
||||
} mm;
|
||||
/*
|
||||
* This function is called to allocate secure memory (memory
|
||||
|
||||
@@ -790,6 +790,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
||||
u32 data;
|
||||
int ret = 0;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 retries;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -799,7 +800,12 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
|
||||
retries = 100;
|
||||
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||
|
||||
@@ -844,10 +850,14 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
||||
{
|
||||
u32 data;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 retries = 200;
|
||||
|
||||
trace_gk20a_mm_l2_invalidate(g->name);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
/* Invalidate any clean lines from the L2 so subsequent reads go to
|
||||
DRAM. Dirty lines are not affected by this operation. */
|
||||
@@ -891,6 +901,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
u32 data;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 retries = 2000;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -898,7 +909,10 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
||||
if (!g->power_on)
|
||||
goto hw_was_off;
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||
|
||||
@@ -939,6 +953,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
u32 data;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 retries = 200;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -946,7 +961,10 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
|
||||
if (!g->power_on)
|
||||
goto hw_was_off;
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||
|
||||
|
||||
@@ -315,6 +315,14 @@ static inline u64 __nv_gmmu_va_small_page_limit(void)
|
||||
return ((u64)SZ_1G * 56);
|
||||
}
|
||||
|
||||
enum nvgpu_flush_op {
|
||||
NVGPU_FLUSH_DEFAULT,
|
||||
NVGPU_FLUSH_FB,
|
||||
NVGPU_FLUSH_L2_INV,
|
||||
NVGPU_FLUSH_L2_FLUSH,
|
||||
NVGPU_FLUSH_CBC_CLEAN,
|
||||
};
|
||||
|
||||
enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
|
||||
u64 base, u64 size);
|
||||
enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
|
||||
|
||||
Reference in New Issue
Block a user