gpu: nvgpu: compile out fb and ramin non-fusa code

fbpa related functions are not supported on igpu safety. Don't
compile them if CONFIG_NVGPU_DGPU is not set.
Also compile out fb and ramin hals that are dgpu specific.
Update the tests for the same.

JIRA NVGPU-4529

Change-Id: I1cd976c3bd17707c0d174a62cf753590512c3a37
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2265402
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-12-19 09:45:15 +05:30
committed by Alex Waterman
parent 1ec4a4f8ec
commit f3421645b2
11 changed files with 118 additions and 130 deletions

View File

@@ -62,9 +62,11 @@ void nvgpu_ecc_free(struct gk20a *g)
g->ops.fb.fb_ecc_free(g); g->ops.fb.fb_ecc_free(g);
} }
#ifdef CONFIG_NVGPU_DGPU
if (g->ops.fb.fbpa_ecc_free != NULL) { if (g->ops.fb.fbpa_ecc_free != NULL) {
g->ops.fb.fbpa_ecc_free(g); g->ops.fb.fbpa_ecc_free(g);
} }
#endif
if (g->ops.pmu.ecc_free != NULL) { if (g->ops.pmu.ecc_free != NULL) {
g->ops.pmu.ecc_free(g); g->ops.pmu.ecc_free(g);

View File

@@ -340,6 +340,7 @@ static int nvgpu_init_release_tpc_pg_lock(struct gk20a *g)
} }
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
static int nvgpu_init_fb_mem_unlock(struct gk20a *g) static int nvgpu_init_fb_mem_unlock(struct gk20a *g)
{ {
int err; int err;
@@ -356,6 +357,21 @@ static int nvgpu_init_fb_mem_unlock(struct gk20a *g)
return 0; return 0;
} }
static int nvgpu_init_fbpa_ecc(struct gk20a *g)
{
int err;
if (g->ops.fb.fbpa_ecc_init != NULL && !g->ecc.initialized) {
err = g->ops.fb.fbpa_ecc_init(g);
if (err != 0) {
return err;
}
}
return 0;
}
#endif
#ifdef CONFIG_NVGPU_TPC_POWERGATE #ifdef CONFIG_NVGPU_TPC_POWERGATE
static int nvgpu_init_power_gate(struct gk20a *g) static int nvgpu_init_power_gate(struct gk20a *g)
{ {
@@ -499,20 +515,6 @@ static int nvgpu_init_interrupt_setup(struct gk20a *g)
return 0; return 0;
} }
static int nvgpu_init_fbpa_ecc(struct gk20a *g)
{
int err;
if (g->ops.fb.fbpa_ecc_init != NULL && !g->ecc.initialized) {
err = g->ops.fb.fbpa_ecc_init(g);
if (err != 0) {
return err;
}
}
return 0;
}
typedef int (*nvgpu_init_func_t)(struct gk20a *g); typedef int (*nvgpu_init_func_t)(struct gk20a *g);
struct nvgpu_init_table_t { struct nvgpu_init_table_t {
nvgpu_init_func_t func; nvgpu_init_func_t func;
@@ -576,13 +578,18 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_INIT_TABLE_ENTRY(g->ops.clk.init_clk_support, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.clk.init_clk_support, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.nvlink.init, NVGPU_INIT_TABLE_ENTRY(g->ops.nvlink.init,
NVGPU_SUPPORT_NVLINK), NVGPU_SUPPORT_NVLINK),
#ifdef CONFIG_NVGPU_DGPU
NVGPU_INIT_TABLE_ENTRY(nvgpu_init_fbpa_ecc, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(nvgpu_init_fbpa_ecc, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.fb.init_fbpa, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.fb.init_fbpa, NO_FLAG),
#endif
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
NVGPU_INIT_TABLE_ENTRY(g->ops.ptimer.config_gr_tick_freq, NVGPU_INIT_TABLE_ENTRY(g->ops.ptimer.config_gr_tick_freq,
NO_FLAG), NO_FLAG),
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_fb_mem_unlock, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_fb_mem_unlock, NO_FLAG),
#endif
NVGPU_INIT_TABLE_ENTRY(g->ops.fifo.reset_enable_hw, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.fifo.reset_enable_hw, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.ltc.init_ltc_support, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.ltc.init_ltc_support, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.mm.init_mm_support, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.mm.init_mm_support, NO_FLAG),

View File

@@ -170,12 +170,12 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
#endif #endif
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
nvgpu_vidmem_destroy(g); nvgpu_vidmem_destroy(g);
#endif
nvgpu_pd_cache_fini(g);
if (g->ops.ramin.deinit_pdb_cache_war != NULL) { if (g->ops.ramin.deinit_pdb_cache_war != NULL) {
g->ops.ramin.deinit_pdb_cache_war(g); g->ops.ramin.deinit_pdb_cache_war(g);
} }
#endif
nvgpu_pd_cache_fini(g);
} }
/* pmu vm, share channel_vm interfaces */ /* pmu vm, share channel_vm interfaces */
@@ -560,6 +560,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
return 0; return 0;
} }
#ifdef CONFIG_NVGPU_DGPU
static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g) static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g)
{ {
int err; int err;
@@ -580,6 +581,7 @@ static int nvgpu_init_mm_pdb_cache_war(struct gk20a *g)
return 0; return 0;
} }
#endif
/* /*
* Called through the HAL to handle vGPU: the vGPU doesn't have HW to initialize * Called through the HAL to handle vGPU: the vGPU doesn't have HW to initialize
@@ -638,10 +640,12 @@ int nvgpu_init_mm_support(struct gk20a *g)
nvgpu_init_mm_reset_enable_hw(g); nvgpu_init_mm_reset_enable_hw(g);
#ifdef CONFIG_NVGPU_DGPU
err = nvgpu_init_mm_pdb_cache_war(g); err = nvgpu_init_mm_pdb_cache_war(g);
if (err != 0) { if (err != 0) {
return err; return err;
} }
#endif
err = nvgpu_init_mm_setup_sw(g); err = nvgpu_init_mm_setup_sw(g);
if (err != 0) { if (err != 0) {

View File

@@ -586,7 +586,9 @@ static const struct gpu_ops gm20b_ops = {
.set_mmu_debug_mode = gm20b_fb_set_mmu_debug_mode, .set_mmu_debug_mode = gm20b_fb_set_mmu_debug_mode,
#endif #endif
.tlb_invalidate = gm20b_fb_tlb_invalidate, .tlb_invalidate = gm20b_fb_tlb_invalidate,
#ifdef CONFIG_NVGPU_DGPU
.mem_unlock = NULL, .mem_unlock = NULL,
#endif
}, },
.cg = { .cg = {
.slcg_bus_load_gating_prod = .slcg_bus_load_gating_prod =

View File

@@ -668,7 +668,9 @@ static const struct gpu_ops gp10b_ops = {
.set_mmu_debug_mode = gm20b_fb_set_mmu_debug_mode, .set_mmu_debug_mode = gm20b_fb_set_mmu_debug_mode,
#endif #endif
.tlb_invalidate = gm20b_fb_tlb_invalidate, .tlb_invalidate = gm20b_fb_tlb_invalidate,
#ifdef CONFIG_NVGPU_DGPU
.mem_unlock = NULL, .mem_unlock = NULL,
#endif
}, },
.cg = { .cg = {
.slcg_bus_load_gating_prod = .slcg_bus_load_gating_prod =

View File

@@ -813,7 +813,9 @@ static const struct gpu_ops gv11b_ops = {
.handle_replayable_fault = gv11b_fb_handle_replayable_mmu_fault, .handle_replayable_fault = gv11b_fb_handle_replayable_mmu_fault,
.mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay, .mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
.mem_unlock = NULL, .mem_unlock = NULL,
#endif
.write_mmu_fault_buffer_lo_hi = .write_mmu_fault_buffer_lo_hi =
gv11b_fb_write_mmu_fault_buffer_lo_hi, gv11b_fb_write_mmu_fault_buffer_lo_hi,
.write_mmu_fault_buffer_get = .write_mmu_fault_buffer_get =

View File

@@ -117,28 +117,6 @@ struct gops_fb {
*/ */
void (*fb_ecc_free)(struct gk20a *g); void (*fb_ecc_free)(struct gk20a *g);
/**
* @brief Initialize FBPA unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function allocates memory to track the ecc error counts
* for FBPA unit.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int (*fbpa_ecc_init)(struct gk20a *g);
/**
* @brief Free FBPA unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function deallocates memory allocated for ecc error counts
* for FBPA unit.
*/
void (*fbpa_ecc_free)(struct gk20a *g);
/** /**
* @brief Initializes frame buffer h/w configuration. * @brief Initializes frame buffer h/w configuration.
* *
@@ -385,16 +363,36 @@ struct gops_fb {
int (*mmu_invalidate_replay)(struct gk20a *g, int (*mmu_invalidate_replay)(struct gk20a *g,
u32 invalidate_replay_val); u32 invalidate_replay_val);
#endif #endif
#ifdef CONFIG_NVGPU_DGPU
/**
* @brief Initialize FBPA unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function allocates memory to track the ecc error counts
* for FBPA unit.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int (*fbpa_ecc_init)(struct gk20a *g);
/**
* @brief Free FBPA unit ECC support.
*
* @param g [in] Pointer to GPU driver struct.
*
* This function deallocates memory allocated for ecc error counts
* for FBPA unit.
*/
void (*fbpa_ecc_free)(struct gk20a *g);
int (*mem_unlock)(struct gk20a *g); int (*mem_unlock)(struct gk20a *g);
int (*init_nvlink)(struct gk20a *g); int (*init_nvlink)(struct gk20a *g);
int (*enable_nvlink)(struct gk20a *g); int (*enable_nvlink)(struct gk20a *g);
#ifdef CONFIG_NVGPU_DGPU
size_t (*get_vidmem_size)(struct gk20a *g); size_t (*get_vidmem_size)(struct gk20a *g);
#endif
int (*apply_pdb_cache_war)(struct gk20a *g); int (*apply_pdb_cache_war)(struct gk20a *g);
int (*init_fbpa)(struct gk20a *g); int (*init_fbpa)(struct gk20a *g);
void (*handle_fbpa_intr)(struct gk20a *g, u32 fbpa_id); void (*handle_fbpa_intr)(struct gk20a *g, u32 fbpa_id);
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */ /** @endcond DOXYGEN_SHOULD_SKIP_THIS */
}; };

View File

@@ -118,28 +118,6 @@ struct gops_ramin {
struct nvgpu_mem *pdb_mem, struct nvgpu_mem *pdb_mem,
bool replayable); bool replayable);
/**
* @brief Init WAR for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
* This HAL allows implementing chip specific initialization
* related to PDB cache.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int (*init_pdb_cache_war)(struct gk20a *g);
/**
* @brief Deinit WAR for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
* This HAL allows implementing chip specific de-initialization
* related to PDB cache.
*/
void (*deinit_pdb_cache_war)(struct gk20a *g);
/** /**
* @brief Instance Block shift. * @brief Instance Block shift.
* *
@@ -163,6 +141,30 @@ struct gops_ramin {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
#ifdef CONFIG_NVGPU_DGPU
/**
* @brief Init WAR for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
* This HAL allows implementing chip specific initialization
* related to PDB cache.
*
* @return 0 in case of success, < 0 in case of failure.
*/
int (*init_pdb_cache_war)(struct gk20a *g);
/**
* @brief Deinit WAR for PDB cache.
*
* @param g [in] Pointer to GPU driver struct.
*
* This HAL allows implementing chip specific de-initialization
* related to PDB cache.
*/
void (*deinit_pdb_cache_war)(struct gk20a *g);
#endif
void (*set_adr_limit)(struct gk20a *g, void (*set_adr_limit)(struct gk20a *g,
struct nvgpu_mem *inst_block, u64 va_limit); struct nvgpu_mem *inst_block, u64 va_limit);
void (*set_eng_method_buffer)(struct gk20a *g, void (*set_eng_method_buffer)(struct gk20a *g,

View File

@@ -486,8 +486,6 @@ static void set_poweron_funcs_success(struct gk20a *g)
setup_simple_init_func_success(&g->ops.mm.pd_cache_init, i++); setup_simple_init_func_success(&g->ops.mm.pd_cache_init, i++);
setup_simple_init_func_success(&g->ops.clk.init_clk_support, i++); setup_simple_init_func_success(&g->ops.clk.init_clk_support, i++);
setup_simple_init_func_success(&g->ops.nvlink.init, i++); setup_simple_init_func_success(&g->ops.nvlink.init, i++);
setup_simple_init_func_success(&g->ops.fb.init_fbpa, i++);
setup_simple_init_func_success(&g->ops.fb.mem_unlock, i++);
setup_simple_init_func_success(&g->ops.fifo.reset_enable_hw, i++); setup_simple_init_func_success(&g->ops.fifo.reset_enable_hw, i++);
setup_simple_init_func_success(&g->ops.ltc.init_ltc_support, i++); setup_simple_init_func_success(&g->ops.ltc.init_ltc_support, i++);
setup_simple_init_func_success(&g->ops.mm.init_mm_support, i++); setup_simple_init_func_success(&g->ops.mm.init_mm_support, i++);
@@ -591,8 +589,6 @@ int test_poweron_branches(struct unit_module *m, struct gk20a *g, void *args)
/* hit all the NULL pointer checks */ /* hit all the NULL pointer checks */
g->ops.clk.init_clk_support = NULL; g->ops.clk.init_clk_support = NULL;
g->ops.fb.init_fbpa = NULL;
g->ops.fb.mem_unlock = NULL;
g->ops.therm.elcg_init_idle_filters = NULL; g->ops.therm.elcg_init_idle_filters = NULL;
g->ops.ecc.ecc_init_support = NULL; g->ops.ecc.ecc_init_support = NULL;
g->ops.channel.resume_all_serviceable_ch = NULL; g->ops.channel.resume_all_serviceable_ch = NULL;

View File

@@ -211,90 +211,71 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
int_empty_hal_return_error_after = -1; int_empty_hal_return_error_after = -1;
/* Making g->ops.ramin.init_pdb_cache_war fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 1);
/* Making g->ops.fb.apply_pdb_cache_war fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 2,
ARBITRARY_ERROR, 2);
/* Making nvgpu_alloc_sysmem_flush fail */ /* Making nvgpu_alloc_sysmem_flush fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 0, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 0,
-ENOMEM, 3); -ENOMEM, 1);
/*
* Making nvgpu_alloc_sysmem_flush fail again with NULL HALs to test
* branches in nvgpu_init_mm_pdb_cache_war
*/
g->ops.ramin.init_pdb_cache_war = NULL;
g->ops.fb.apply_pdb_cache_war = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 0,
-ENOMEM, 3);
g->ops.ramin.init_pdb_cache_war = int_empty_hal;
g->ops.fb.apply_pdb_cache_war = int_empty_hal;
/* Making nvgpu_init_bar1_vm fail on VM init */ /* Making nvgpu_init_bar1_vm fail on VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 0, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 0,
-ENOMEM, 4); -ENOMEM, 2);
/* Making nvgpu_init_bar1_vm fail on alloc_inst_block */ /* Making nvgpu_init_bar1_vm fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 2, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 2,
-ENOMEM, 5); -ENOMEM, 3);
/* Making nvgpu_init_bar2_vm fail */ /* Making nvgpu_init_bar2_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 4, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 4,
-ENOMEM, 6); -ENOMEM, 4);
/* Making nvgpu_init_system_vm fail on the PMU VM init */ /* Making nvgpu_init_system_vm fail on the PMU VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 29, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 29,
-ENOMEM, 7); -ENOMEM, 5);
/* Making nvgpu_init_system_vm fail again with extra branch coverage */ /* Making nvgpu_init_system_vm fail again with extra branch coverage */
g->ops.mm.init_bar2_vm = NULL; g->ops.mm.init_bar2_vm = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 20, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 20,
-ENOMEM, 8); -ENOMEM, 6);
g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm; g->ops.mm.init_bar2_vm = gp10b_mm_init_bar2_vm;
/* Making nvgpu_init_system_vm fail on alloc_inst_block */ /* Making nvgpu_init_system_vm fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 6, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 6,
-ENOMEM, 9); -ENOMEM, 7);
/* Making nvgpu_init_hwpm fail */ /* Making nvgpu_init_hwpm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 7, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 7,
-ENOMEM, 10); -ENOMEM, 8);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */ /* Making nvgpu_init_engine_ucode_vm(sec2) fail on VM init */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 46, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 46,
-ENOMEM, 11); -ENOMEM, 9);
/* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */ /* Making nvgpu_init_engine_ucode_vm(sec2) fail on alloc_inst_block */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 9, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 9,
-ENOMEM, 12); -ENOMEM, 10);
/* Making nvgpu_init_engine_ucode_vm(gsp) fail */ /* Making nvgpu_init_engine_ucode_vm(gsp) fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 11, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 11,
-ENOMEM, 13); -ENOMEM, 11);
/* Making nvgpu_init_cde_vm fail */ /* Making nvgpu_init_cde_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 80, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 80,
-ENOMEM, 14); -ENOMEM, 12);
/* Making nvgpu_init_ce_vm fail */ /* Making nvgpu_init_ce_vm fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 98, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_KMEM, 98,
-ENOMEM, 15); -ENOMEM, 13);
/* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */ /* Making nvgpu_init_mmu_debug fail on wr_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 14, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 14,
-ENOMEM, 16); -ENOMEM, 14);
/* Making nvgpu_init_mmu_debug fail on rd_mem DMA alloc */ /* Making nvgpu_init_mmu_debug fail on rd_mem DMA alloc */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 15, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_DMA, 15,
-ENOMEM, 17); -ENOMEM, 15);
/* Making g->ops.mm.mmu_fault.setup_sw fail */ /* Making g->ops.mm.mmu_fault.setup_sw fail */
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 3, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 18); ARBITRARY_ERROR, 16);
/* /*
* Extra cases for branch coverage: change support flags to test * Extra cases for branch coverage: change support flags to test
@@ -305,8 +286,8 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM, false); nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM, false);
g->has_cde = false; g->has_cde = false;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 3, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 19); ARBITRARY_ERROR, 17);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
@@ -320,8 +301,8 @@ int test_nvgpu_init_mm(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mc.fb_reset = NULL; g->ops.mc.fb_reset = NULL;
g->ops.fb.init_fs_state = NULL; g->ops.fb.init_fs_state = NULL;
errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 3, errors += nvgpu_init_mm_support_inject_error(m, g, ERROR_TYPE_HAL, 1,
ARBITRARY_ERROR, 20); ARBITRARY_ERROR, 18);
g->ops.mc.fb_reset = void_empty_hal; g->ops.mc.fb_reset = void_empty_hal;
g->ops.fb.init_fs_state = void_empty_hal; g->ops.fb.init_fs_state = void_empty_hal;
@@ -501,14 +482,12 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
* For extra coverage. Note: the goal of this unit test is to validate * For extra coverage. Note: the goal of this unit test is to validate
* the mm.mm unit, not the underlying HALs. * the mm.mm unit, not the underlying HALs.
*/ */
g->ops.fb.apply_pdb_cache_war = int_empty_hal;
g->ops.fb.init_fs_state = void_empty_hal; g->ops.fb.init_fs_state = void_empty_hal;
g->ops.fb.set_mmu_page_size = void_empty_hal; g->ops.fb.set_mmu_page_size = void_empty_hal;
g->ops.mc.fb_reset = void_empty_hal; g->ops.mc.fb_reset = void_empty_hal;
g->ops.mm.mmu_fault.setup_hw = void_empty_hal; g->ops.mm.mmu_fault.setup_hw = void_empty_hal;
g->ops.mm.mmu_fault.setup_sw = int_empty_hal; g->ops.mm.mmu_fault.setup_sw = int_empty_hal;
g->ops.mm.setup_hw = int_empty_hal; g->ops.mm.setup_hw = int_empty_hal;
g->ops.ramin.init_pdb_cache_war = int_empty_hal;
nvgpu_posix_register_io(g, &mmu_faults_callbacks); nvgpu_posix_register_io(g, &mmu_faults_callbacks);
nvgpu_posix_io_init_reg_space(g); nvgpu_posix_io_init_reg_space(g);
@@ -563,29 +542,25 @@ int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }
/*
* Simple helper to toggle a flag when called.
*/
static void helper_deinit_pdb_cache_war(struct gk20a *g)
{
test_flag = true;
}
int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g, int test_mm_remove_mm_support(struct unit_module *m, struct gk20a *g,
void *args) void *args)
{ {
int err;
/* /*
* Since the last step of the removal is to call * Since the last step of the removal is to call nvgpu_pd_cache_fini,
* g->ops.ramin.deinit_pdb_cache_war, it is a good indication that * g->mm.pd_cache = NULL indicates that the removal completed
* the removal completed successfully. * successfully.
*/ */
g->ops.ramin.deinit_pdb_cache_war = helper_deinit_pdb_cache_war;
test_flag = false; err = nvgpu_pd_cache_init(g);
if (err != 0) {
unit_return_fail(m, "nvgpu_pd_cache_init failed ??\n");
}
g->mm.remove_support(&g->mm); g->mm.remove_support(&g->mm);
g->ops.ramin.deinit_pdb_cache_war = NULL; if (g->mm.pd_cache != NULL) {
if (!test_flag) {
unit_return_fail(m, "mm removal did not complete\n"); unit_return_fail(m, "mm removal did not complete\n");
} }

View File

@@ -139,8 +139,8 @@ int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args);
* *
* Description: The mm.remove_support operation (nvgpu_remove_mm_support * Description: The mm.remove_support operation (nvgpu_remove_mm_support
* function) shall de-allocate all resources related to mm. In particular, it * function) shall de-allocate all resources related to mm. In particular, it
* is expected that nvgpu_remove_mm_support will call the * is expected that nvgpu_remove_mm_support will call the nvgpu_pd_cache_fini
* ramin.deinit_pdb_cache_war HAL as its last step. * as its last step.
* *
* Test Type: Feature based * Test Type: Feature based
* *
@@ -148,11 +148,9 @@ int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args);
* have been executed successfully * have been executed successfully
* *
* Steps: * Steps:
* - Setup the ramin.deinit_pdb_cache_war HAL to use a test HAL that will set * - Allocate pd_cache by calling nvgpu_pd_cache_init.
* a flag when called.
* - Call mm.remove_support. * - Call mm.remove_support.
* - Disable the ramin.deinit_pdb_cache_war HAL. (set it to NULL) * - Verify that g->mm.pd_cache is NULL.
* - Ensure that the test flag was set.
* - Setup additional HALs for line/branch coverage: mmu_fault.info_mem_destroy * - Setup additional HALs for line/branch coverage: mmu_fault.info_mem_destroy
* and mm.remove_bar2_vm. * and mm.remove_bar2_vm.
* - Call mm.remove_support again. * - Call mm.remove_support again.