mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add safety build flag NVGPU_FEATURE_CE
Kernel mode submit depends on CE as part of Vidmem clear ops. Added a flag to support compiling out CE unit. Jira NVGPU-3523 Change-Id: I74e956cc602d2f1d6d417ddd0ca7c5f0faf46744 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2127109 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bf5f86b354
commit
168cb16f6b
@@ -32,6 +32,7 @@ ccflags-y += -DNVGPU_REPLAYABLE_FAULT
|
|||||||
ccflags-y += -DNVGPU_GRAPHICS
|
ccflags-y += -DNVGPU_GRAPHICS
|
||||||
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_SCHEDULING
|
||||||
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
ccflags-y += -DNVGPU_FEATURE_CHANNEL_TSG_CONTROL
|
||||||
|
ccflags-y += -DNVGPU_FEATURE_CE
|
||||||
|
|
||||||
obj-$(CONFIG_GK20A) := nvgpu.o
|
obj-$(CONFIG_GK20A) := nvgpu.o
|
||||||
|
|
||||||
|
|||||||
@@ -70,6 +70,10 @@ NVGPU_COMMON_CFLAGS += -DNVGPU_USERD
|
|||||||
# Enable Channel WDT for safety build until we switch to user mode submits only
|
# Enable Channel WDT for safety build until we switch to user mode submits only
|
||||||
NVGPU_COMMON_CFLAGS += -DNVGPU_CHANNEL_WDT
|
NVGPU_COMMON_CFLAGS += -DNVGPU_CHANNEL_WDT
|
||||||
|
|
||||||
|
# Enable CE support for safety build until we remove Vidmem clear support.
|
||||||
|
NVGPU_FEATURE_CE := 1
|
||||||
|
NVGPU_COMMON_CFLAGS += -DNVGPU_FEATURE_CE
|
||||||
|
|
||||||
# Enable Grpahics support for safety build until we switch to compute only
|
# Enable Grpahics support for safety build until we switch to compute only
|
||||||
NVGPU_GRAPHICS := 1
|
NVGPU_GRAPHICS := 1
|
||||||
NVGPU_COMMON_CFLAGS += -DNVGPU_GRAPHICS
|
NVGPU_COMMON_CFLAGS += -DNVGPU_GRAPHICS
|
||||||
|
|||||||
@@ -95,7 +95,6 @@ srcs += common/utils/enabled.c \
|
|||||||
common/fbp/fbp.c \
|
common/fbp/fbp.c \
|
||||||
common/io/io.c \
|
common/io/io.c \
|
||||||
common/ecc.c \
|
common/ecc.c \
|
||||||
common/ce/ce.c \
|
|
||||||
common/vbios/bios.c \
|
common/vbios/bios.c \
|
||||||
common/falcon/falcon.c \
|
common/falcon/falcon.c \
|
||||||
common/falcon/falcon_sw_gk20a.c \
|
common/falcon/falcon_sw_gk20a.c \
|
||||||
@@ -316,6 +315,10 @@ ifeq ($(NVGPU_DEBUGGER),1)
|
|||||||
srcs += common/debugger.c
|
srcs += common/debugger.c
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(NVGPU_FEATURE_CE),1)
|
||||||
|
srcs += common/ce/ce.c
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(NVGPU_FECS_TRACE_SUPPORT),1)
|
ifeq ($(NVGPU_FECS_TRACE_SUPPORT),1)
|
||||||
srcs += common/gr/fecs_trace.c \
|
srcs += common/gr/fecs_trace.c \
|
||||||
hal/gr/fecs_trace/fecs_trace_gm20b.c \
|
hal/gr/fecs_trace/fecs_trace_gm20b.c \
|
||||||
|
|||||||
@@ -132,7 +132,9 @@ int gk20a_prepare_poweroff(struct gk20a *g)
|
|||||||
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
|
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
|
||||||
nvgpu_falcon_sw_free(g, FALCON_ID_PMU);
|
nvgpu_falcon_sw_free(g, FALCON_ID_PMU);
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
nvgpu_ce_suspend(g);
|
nvgpu_ce_suspend(g);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef NVGPU_DGPU_SUPPORT
|
#ifdef NVGPU_DGPU_SUPPORT
|
||||||
/* deinit the bios */
|
/* deinit the bios */
|
||||||
@@ -467,11 +469,13 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
|||||||
/* Restore the debug setting */
|
/* Restore the debug setting */
|
||||||
g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl);
|
g->ops.fb.set_debug_mode(g, g->mmu_debug_ctrl);
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
err = nvgpu_ce_init_support(g);
|
err = nvgpu_ce_init_support(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "failed to init ce");
|
nvgpu_err(g, "failed to init ce");
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (g->ops.xve.available_speeds != NULL) {
|
if (g->ops.xve.available_speeds != NULL) {
|
||||||
u32 speed;
|
u32 speed;
|
||||||
@@ -642,7 +646,9 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
|
|||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!");
|
nvgpu_log(g, gpu_dbg_shutdown, "Freeing GK20A struct!");
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
nvgpu_ce_destroy(g);
|
nvgpu_ce_destroy(g);
|
||||||
|
#endif
|
||||||
|
|
||||||
nvgpu_cbc_remove_support(g);
|
nvgpu_cbc_remove_support(g);
|
||||||
|
|
||||||
|
|||||||
@@ -190,6 +190,7 @@ static int nvgpu_alloc_sysmem_flush(struct gk20a *g)
|
|||||||
return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
|
return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
|
static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
|
||||||
{
|
{
|
||||||
struct gk20a *g = gk20a_from_mm(mm);
|
struct gk20a *g = gk20a_from_mm(mm);
|
||||||
@@ -197,11 +198,11 @@ static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
|
|||||||
if (mm->vidmem.ce_ctx_id != NVGPU_CE_INVAL_CTX_ID) {
|
if (mm->vidmem.ce_ctx_id != NVGPU_CE_INVAL_CTX_ID) {
|
||||||
nvgpu_ce_delete_context(g, mm->vidmem.ce_ctx_id);
|
nvgpu_ce_delete_context(g, mm->vidmem.ce_ctx_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
|
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
|
||||||
|
|
||||||
nvgpu_vm_put(mm->ce.vm);
|
nvgpu_vm_put(mm->ce.vm);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
|
static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
|
||||||
{
|
{
|
||||||
@@ -372,6 +373,7 @@ static int nvgpu_init_mmu_debug(struct mm_gk20a *mm)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
void nvgpu_init_mm_ce_context(struct gk20a *g)
|
void nvgpu_init_mm_ce_context(struct gk20a *g)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
@@ -390,6 +392,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#endif /* NVGPU_FENCE_CE */
|
||||||
|
|
||||||
static int nvgpu_init_mm_reset_enable_hw(struct gk20a *g)
|
static int nvgpu_init_mm_reset_enable_hw(struct gk20a *g)
|
||||||
{
|
{
|
||||||
@@ -591,7 +594,9 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mm->remove_support = nvgpu_remove_mm_support;
|
mm->remove_support = nvgpu_remove_mm_support;
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
mm->remove_ce_support = nvgpu_remove_mm_ce_support;
|
mm->remove_ce_support = nvgpu_remove_mm_ce_support;
|
||||||
|
#endif
|
||||||
|
|
||||||
mm->sw_ready = true;
|
mm->sw_ready = true;
|
||||||
|
|
||||||
|
|||||||
@@ -103,6 +103,7 @@ static int nvgpu_vidmem_do_clear_all(struct gk20a *g)
|
|||||||
|
|
||||||
vidmem_dbg(g, "Clearing all VIDMEM:");
|
vidmem_dbg(g, "Clearing all VIDMEM:");
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
err = nvgpu_ce_execute_ops(g,
|
err = nvgpu_ce_execute_ops(g,
|
||||||
mm->vidmem.ce_ctx_id,
|
mm->vidmem.ce_ctx_id,
|
||||||
0,
|
0,
|
||||||
@@ -118,6 +119,7 @@ static int nvgpu_vidmem_do_clear_all(struct gk20a *g)
|
|||||||
"Failed to clear vidmem : %d", err);
|
"Failed to clear vidmem : %d", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (fence_out != NULL) {
|
if (fence_out != NULL) {
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
@@ -477,6 +479,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
nvgpu_fence_put(last_fence);
|
nvgpu_fence_put(last_fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
err = nvgpu_ce_execute_ops(g,
|
err = nvgpu_ce_execute_ops(g,
|
||||||
g->mm.vidmem.ce_ctx_id,
|
g->mm.vidmem.ce_ctx_id,
|
||||||
0,
|
0,
|
||||||
@@ -487,10 +490,16 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
NVGPU_CE_MEMSET,
|
NVGPU_CE_MEMSET,
|
||||||
0,
|
0,
|
||||||
&fence_out);
|
&fence_out);
|
||||||
|
#else
|
||||||
|
/* fail due to lack of ce support */
|
||||||
|
err = -ENOSYS;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"Failed nvgpu_ce_execute_ops[%d]", err);
|
"Failed nvgpu_ce_execute_ops[%d]", err);
|
||||||
|
#endif
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -104,12 +104,13 @@ struct mm_gk20a {
|
|||||||
struct nvgpu_mem hw_fault_buf[NVGPU_MMU_FAULT_TYPE_NUM];
|
struct nvgpu_mem hw_fault_buf[NVGPU_MMU_FAULT_TYPE_NUM];
|
||||||
struct mmu_fault_info fault_info[NVGPU_MMU_FAULT_TYPE_NUM];
|
struct mmu_fault_info fault_info[NVGPU_MMU_FAULT_TYPE_NUM];
|
||||||
struct nvgpu_mutex hub_isr_mutex;
|
struct nvgpu_mutex hub_isr_mutex;
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
/*
|
/*
|
||||||
* Separate function to cleanup the CE since it requires a channel to
|
* Separate function to cleanup the CE since it requires a channel to
|
||||||
* be closed which must happen before fifo cleanup.
|
* be closed which must happen before fifo cleanup.
|
||||||
*/
|
*/
|
||||||
void (*remove_ce_support)(struct mm_gk20a *mm);
|
void (*remove_ce_support)(struct mm_gk20a *mm);
|
||||||
|
#endif
|
||||||
void (*remove_support)(struct mm_gk20a *mm);
|
void (*remove_support)(struct mm_gk20a *mm);
|
||||||
bool sw_ready;
|
bool sw_ready;
|
||||||
int physical_bits;
|
int physical_bits;
|
||||||
@@ -179,7 +180,9 @@ static inline u64 nvgpu_gmmu_va_small_page_limit(void)
|
|||||||
|
|
||||||
u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
|
u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
void nvgpu_init_mm_ce_context(struct gk20a *g);
|
void nvgpu_init_mm_ce_context(struct gk20a *g);
|
||||||
|
#endif
|
||||||
int nvgpu_init_mm_support(struct gk20a *g);
|
int nvgpu_init_mm_support(struct gk20a *g);
|
||||||
|
|
||||||
int nvgpu_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
int nvgpu_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||||
|
|||||||
@@ -431,7 +431,9 @@ int gk20a_pm_finalize_poweron(struct device *dev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
|
#ifdef NVGPU_FEATURE_CE
|
||||||
nvgpu_init_mm_ce_context(g);
|
nvgpu_init_mm_ce_context(g);
|
||||||
|
#endif
|
||||||
|
|
||||||
nvgpu_vidmem_thread_unpause(&g->mm);
|
nvgpu_vidmem_thread_unpause(&g->mm);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user