gpu: nvgpu: compile out sim changes from safety build

As sim is non-safe unit compile it out. Also removed FMODEL related
nvgpu changes and unit tests from the safety build.

JIRA NVGPU-3527

Change-Id: I22c83e195a09f9150fb6f5a3afff91df2ea075b9
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2139455
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Raghuram Kothakota <rkothakota@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-06-19 19:11:46 +05:30
committed by mobile promotions
parent ad92d2d8cf
commit 5d37a9e489
34 changed files with 122 additions and 27 deletions

View File

@@ -41,6 +41,7 @@ ccflags-y += -DCONFIG_NVGPU_CHANNEL_TSG_CONTROL
ccflags-y += -DCONFIG_NVGPU_POWER_PG ccflags-y += -DCONFIG_NVGPU_POWER_PG
ccflags-y += -DCONFIG_NVGPU_CE ccflags-y += -DCONFIG_NVGPU_CE
ccflags-y += -DCONFIG_NVGPU_COMPRESSION ccflags-y += -DCONFIG_NVGPU_COMPRESSION
ccflags-y += -DCONFIG_NVGPU_SIM
ifeq ($(CONFIG_NVGPU_LOGGING),y) ifeq ($(CONFIG_NVGPU_LOGGING),y)
ccflags-y += -DCONFIG_NVGPU_LOGGING=1 ccflags-y += -DCONFIG_NVGPU_LOGGING=1

View File

@@ -147,5 +147,9 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_LS_PMU
CONFIG_NVGPU_POWER_PG := 1 CONFIG_NVGPU_POWER_PG := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_POWER_PG NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_POWER_PG
# Enable sim support for normal build
CONFIG_NVGPU_SIM := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SIM
endif endif
endif endif

View File

@@ -36,7 +36,6 @@ srcs += os/posix/nvgpu.c \
os/posix/posix-channel.c \ os/posix/posix-channel.c \
os/posix/posix-tsg.c \ os/posix/posix-tsg.c \
os/posix/stubs.c \ os/posix/stubs.c \
os/posix/posix-sim.c \
os/posix/posix-nvhost.c \ os/posix/posix-nvhost.c \
os/posix/posix-vgpu.c \ os/posix/posix-vgpu.c \
os/posix/posix-dt.c \ os/posix/posix-dt.c \
@@ -57,6 +56,10 @@ endif
ifeq ($(CONFIG_NVGPU_LOGGING),1) ifeq ($(CONFIG_NVGPU_LOGGING),1)
srcs += os/posix/log.c srcs += os/posix/log.c
endif endif
ifeq ($(CONFIG_NVGPU_SIM),1)
srcs += os/posix/posix-sim.c
endif
endif endif
# POSIX sources shared between the POSIX and QNX builds. # POSIX sources shared between the POSIX and QNX builds.
@@ -75,9 +78,6 @@ srcs += common/utils/enabled.c \
common/utils/rbtree.c \ common/utils/rbtree.c \
common/utils/string.c \ common/utils/string.c \
common/utils/worker.c \ common/utils/worker.c \
common/sim/sim.c \
common/sim/sim_pci.c \
common/sim/sim_netlist.c \
common/init/nvgpu_init.c \ common/init/nvgpu_init.c \
common/mm/allocators/nvgpu_allocator.c \ common/mm/allocators/nvgpu_allocator.c \
common/mm/allocators/bitmap_allocator.c \ common/mm/allocators/bitmap_allocator.c \
@@ -551,3 +551,9 @@ ifeq ($(CONFIG_NVGPU_COMPRESSION),1)
srcs += hal/cbc/cbc_tu104.c srcs += hal/cbc/cbc_tu104.c
endif endif
endif endif
ifeq ($(CONFIG_NVGPU_SIM),1)
srcs += common/sim/sim.c \
common/sim/sim_pci.c \
common/sim/sim_netlist.c
endif

View File

@@ -41,7 +41,13 @@
bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr, bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr,
u32 falcon_id) u32 falcon_id)
{ {
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) || acr == NULL) { #ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return false;
}
#endif
if (acr == NULL) {
return false; return false;
} }
@@ -51,9 +57,11 @@ bool nvgpu_acr_is_lsf_lazy_bootstrap(struct gk20a *g, struct nvgpu_acr *acr,
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr, int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
size_t size) size_t size)
{ {
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
if (acr == NULL) { if (acr == NULL) {
return -EINVAL; return -EINVAL;
@@ -67,9 +75,11 @@ int nvgpu_acr_bootstrap_hs_acr(struct gk20a *g, struct nvgpu_acr *acr)
{ {
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
if (acr == NULL) { if (acr == NULL) {
return -EINVAL; return -EINVAL;
@@ -87,10 +97,11 @@ int nvgpu_acr_construct_execute(struct gk20a *g, struct nvgpu_acr *acr)
{ {
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
if (acr == NULL) { if (acr == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -117,9 +128,11 @@ int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr)
g->params.gpu_impl); g->params.gpu_impl);
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
goto done; goto done;
} }
#endif
if (*acr != NULL) { if (*acr != NULL) {
/* /*

View File

@@ -503,9 +503,11 @@ int nvgpu_gr_falcon_load_ctxsw_ucode(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
g->ops.gr.falcon.configure_fmodel(g); g->ops.gr.falcon.configure_fmodel(g);
} }
#endif
/* /*
* In case bootloader is not supported, revert to the old way of * In case bootloader is not supported, revert to the old way of
@@ -551,9 +553,11 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
g->ops.gr.falcon.configure_fmodel(g); g->ops.gr.falcon.configure_fmodel(g);
} }
#endif
if (nvgpu_is_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE)) { if (nvgpu_is_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE)) {
/* this must be recovery so bootstrap fecs and gpccs */ /* this must be recovery so bootstrap fecs and gpccs */

View File

@@ -333,9 +333,11 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
vidmem_dbg(g, "init begin"); vidmem_dbg(g, "init begin");
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
bootstrap_size = SZ_32M; bootstrap_size = SZ_32M;
} }
#endif
bootstrap_co.base = size - bootstrap_size; bootstrap_co.base = size - bootstrap_size;
bootstrap_co.length = bootstrap_size; bootstrap_co.length = bootstrap_size;

View File

@@ -535,9 +535,12 @@ int nvgpu_netlist_init_ctx_vars(struct gk20a *g)
return -ENOMEM; return -ENOMEM;
} }
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return nvgpu_init_sim_netlist_ctx_vars(g); return nvgpu_init_sim_netlist_ctx_vars(g);
} else { } else
#endif
{
return nvgpu_netlist_init_ctx_vars_fw(g); return nvgpu_netlist_init_ctx_vars_fw(g);
} }
} }

View File

@@ -37,9 +37,14 @@
static bool is_lsfm_supported(struct gk20a *g, static bool is_lsfm_supported(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm) struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm)
{ {
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) && if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) &&
!nvgpu_is_enabled(g, NVGPU_IS_FMODEL) && !nvgpu_is_enabled(g, NVGPU_IS_FMODEL) &&
(lsfm != NULL)) { (lsfm != NULL)) {
#else
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) &&
(lsfm != NULL)) {
#endif
return true; return true;
} }
@@ -126,8 +131,12 @@ int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm)
u32 ver = g->params.gpu_arch + g->params.gpu_impl; u32 ver = g->params.gpu_arch + g->params.gpu_impl;
int err = 0; int err = 0;
if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY) || #ifdef CONFIG_NVGPU_SIM
nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){ if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0;
}
#endif
if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
return 0; return 0;
} }

View File

@@ -157,9 +157,11 @@ int nvgpu_bios_sw_init(struct gk20a *g,
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl); u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl);
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
goto done; goto done;
} }
#endif
if (nvgpu_bios_check_dgpu(g, ver) == false) { if (nvgpu_bios_check_dgpu(g, ver) == false) {
goto done; goto done;

View File

@@ -67,9 +67,11 @@ int tu104_bios_verify_devinit(struct gk20a *g)
int tu104_bios_init(struct gk20a *g) int tu104_bios_init(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
return gv100_bios_init(g); return gv100_bios_init(g);
} }

View File

@@ -238,10 +238,13 @@ void gm20b_cbc_init(struct gk20a *g, struct nvgpu_cbc *cbc)
u64 compbit_store_iova; u64 compbit_store_iova;
u64 compbit_base_post_divide64; u64 compbit_base_post_divide64;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
compbit_store_iova = nvgpu_mem_get_phys_addr(g, compbit_store_iova = nvgpu_mem_get_phys_addr(g,
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} else { } else
#endif
{
compbit_store_iova = nvgpu_mem_get_addr(g, compbit_store_iova = nvgpu_mem_get_addr(g,
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} }

View File

@@ -125,10 +125,13 @@ void gv11b_fb_cbc_configure(struct gk20a *g, struct nvgpu_cbc *cbc)
u64 compbit_store_iova; u64 compbit_store_iova;
u64 compbit_base_post_divide64; u64 compbit_base_post_divide64;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
compbit_store_iova = nvgpu_mem_get_phys_addr(g, compbit_store_iova = nvgpu_mem_get_phys_addr(g,
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} else { } else
#endif
{
compbit_store_iova = nvgpu_mem_get_addr(g, compbit_store_iova = nvgpu_mem_get_addr(g,
&cbc->compbit_store.mem); &cbc->compbit_store.mem);
} }

View File

@@ -264,10 +264,12 @@ size_t tu104_fb_get_vidmem_size(struct gk20a *g)
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range); u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
size_t bytes = ((size_t)mag << scale) * SZ_1M; size_t bytes = ((size_t)mag << scale) * SZ_1M;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) && (bytes == 0)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) && (bytes == 0)) {
/* 192 MB */ /* 192 MB */
bytes = 192*1024*1024; bytes = 192*1024*1024;
} }
#endif
if (ecc != 0U) { if (ecc != 0U) {
bytes = bytes / 16U * 15U; bytes = bytes / 16U * 15U;

View File

@@ -38,12 +38,14 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
bool is_wpr_enabled = false; bool is_wpr_enabled = false;
bool is_auto_fetch_disable = false; bool is_auto_fetch_disable = false;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
nvgpu_log(g, gpu_dbg_info, "priv sec is enabled in fmodel"); nvgpu_log(g, gpu_dbg_info, "priv sec is enabled in fmodel");
return 0; return 0;
} }
#endif
if (g->ops.fuse.read_gcplex_config_fuse(g, &gcplex_config) != 0) { if (g->ops.fuse.read_gcplex_config_fuse(g, &gcplex_config) != 0) {
nvgpu_err(g, "err reading gcplex config fuse, check fuse clk"); nvgpu_err(g, "err reading gcplex config fuse, check fuse clk");

View File

@@ -39,12 +39,14 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
bool is_wpr_enabled = false; bool is_wpr_enabled = false;
bool is_auto_fetch_disable = false; bool is_auto_fetch_disable = false;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
nvgpu_log(g, gpu_dbg_info, "priv sec is disabled in fmodel"); nvgpu_log(g, gpu_dbg_info, "priv sec is disabled in fmodel");
return 0; return 0;
} }
#endif
if (g->ops.fuse.read_gcplex_config_fuse(g, &gcplex_config) != 0) { if (g->ops.fuse.read_gcplex_config_fuse(g, &gcplex_config) != 0) {
nvgpu_err(g, "err reading gcplex config fuse, check fuse clk"); nvgpu_err(g, "err reading gcplex config fuse, check fuse clk");

View File

@@ -179,6 +179,7 @@ u32 gm20b_gr_falcon_get_gpccs_start_reg_offset(void)
return (gr_gpcs_gpccs_falcon_hwcfg_r() - gr_fecs_falcon_hwcfg_r()); return (gr_gpcs_gpccs_falcon_hwcfg_r() - gr_fecs_falcon_hwcfg_r());
} }
#ifdef CONFIG_NVGPU_SIM
void gm20b_gr_falcon_configure_fmodel(struct gk20a *g) void gm20b_gr_falcon_configure_fmodel(struct gk20a *g)
{ {
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -189,6 +190,7 @@ void gm20b_gr_falcon_configure_fmodel(struct gk20a *g)
gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777U)); gr_gpccs_ctxsw_mailbox_value_f(0xc0de7777U));
} }
#endif
void gm20b_gr_falcon_start_ucode(struct gk20a *g) void gm20b_gr_falcon_start_ucode(struct gk20a *g)
{ {

View File

@@ -47,7 +47,9 @@ void gm20b_gr_falcon_load_gpccs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size); const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g, void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size); const u32 *ucode_u32_data, u32 ucode_u32_size);
#ifdef CONFIG_NVGPU_SIM
void gm20b_gr_falcon_configure_fmodel(struct gk20a *g); void gm20b_gr_falcon_configure_fmodel(struct gk20a *g);
#endif
void gm20b_gr_falcon_start_ucode(struct gk20a *g); void gm20b_gr_falcon_start_ucode(struct gk20a *g);
void gm20b_gr_falcon_start_gpccs(struct gk20a *g); void gm20b_gr_falcon_start_gpccs(struct gk20a *g);
void gm20b_gr_falcon_start_fecs(struct gk20a *g); void gm20b_gr_falcon_start_fecs(struct gk20a *g);

View File

@@ -593,9 +593,11 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
int err = 0; int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -628,9 +630,11 @@ int gm20b_gr_init_fe_pwr_mode_force_on(struct gk20a *g, bool force_on)
int ret = 0; int ret = 0;
u32 reg_val; u32 reg_val;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return 0; return 0;
} }
#endif
if (force_on) { if (force_on) {
reg_val = gr_fe_pwr_mode_req_send_f() | reg_val = gr_fe_pwr_mode_req_send_f() |

View File

@@ -478,7 +478,9 @@ static const struct gpu_ops gm20b_ops = {
.load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem,
.load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem,
.load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem,
#ifdef CONFIG_NVGPU_SIM
.configure_fmodel = gm20b_gr_falcon_configure_fmodel, .configure_fmodel = gm20b_gr_falcon_configure_fmodel,
#endif
.start_ucode = gm20b_gr_falcon_start_ucode, .start_ucode = gm20b_gr_falcon_start_ucode,
.start_gpccs = gm20b_gr_falcon_start_gpccs, .start_gpccs = gm20b_gr_falcon_start_gpccs,
.start_fecs = gm20b_gr_falcon_start_fecs, .start_fecs = gm20b_gr_falcon_start_fecs,

View File

@@ -547,7 +547,9 @@ static const struct gpu_ops gp10b_ops = {
.load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem,
.load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem,
.load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem,
#ifdef CONFIG_NVGPU_SIM
.configure_fmodel = gm20b_gr_falcon_configure_fmodel, .configure_fmodel = gm20b_gr_falcon_configure_fmodel,
#endif
.start_ucode = gm20b_gr_falcon_start_ucode, .start_ucode = gm20b_gr_falcon_start_ucode,
.start_gpccs = gm20b_gr_falcon_start_gpccs, .start_gpccs = gm20b_gr_falcon_start_gpccs,
.start_fecs = gm20b_gr_falcon_start_fecs, .start_fecs = gm20b_gr_falcon_start_fecs,

View File

@@ -651,7 +651,9 @@ static const struct gpu_ops gv11b_ops = {
.load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem,
.load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem,
.load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem,
#ifdef CONFIG_NVGPU_SIM
.configure_fmodel = gm20b_gr_falcon_configure_fmodel, .configure_fmodel = gm20b_gr_falcon_configure_fmodel,
#endif
.start_ucode = gm20b_gr_falcon_start_ucode, .start_ucode = gm20b_gr_falcon_start_ucode,
.start_gpccs = gm20b_gr_falcon_start_gpccs, .start_gpccs = gm20b_gr_falcon_start_gpccs,
.start_fecs = gm20b_gr_falcon_start_fecs, .start_fecs = gm20b_gr_falcon_start_fecs,

View File

@@ -689,7 +689,9 @@ static const struct gpu_ops tu104_ops = {
.load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem,
.load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem,
.load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem,
#ifdef CONFIG_NVGPU_SIM
.configure_fmodel = gm20b_gr_falcon_configure_fmodel, .configure_fmodel = gm20b_gr_falcon_configure_fmodel,
#endif
.start_ucode = gm20b_gr_falcon_start_ucode, .start_ucode = gm20b_gr_falcon_start_ucode,
.start_gpccs = gm20b_gr_falcon_start_gpccs, .start_gpccs = gm20b_gr_falcon_start_gpccs,
.start_fecs = gm20b_gr_falcon_start_fecs, .start_fecs = gm20b_gr_falcon_start_fecs,
@@ -1529,6 +1531,7 @@ int tu104_init_hal(struct gk20a *g)
nvgpu_pramin_ops_init(g); nvgpu_pramin_ops_init(g);
/* dGpu VDK support */ /* dGpu VDK support */
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){ if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)){
/* Disable compression */ /* Disable compression */
#ifdef CONFIG_NVGPU_COMPRESSION #ifdef CONFIG_NVGPU_COMPRESSION
@@ -1552,7 +1555,9 @@ int tu104_init_hal(struct gk20a *g)
gops->clk_arb.get_arbiter_clk_domains = NULL; gops->clk_arb.get_arbiter_clk_domains = NULL;
gops->clk.support_clk_freq_controller = false; gops->clk.support_clk_freq_controller = false;
} else { } else
#endif
{
nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
} }

View File

@@ -28,10 +28,13 @@
bool tu104_is_pmu_supported(struct gk20a *g) bool tu104_is_pmu_supported(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_SIM
/* PMU not supported in dGpu Simulation */ /* PMU not supported in dGpu Simulation */
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
return false; return false;
} else { } else
#endif
{
return true; return true;
} }
} }

View File

@@ -38,10 +38,12 @@
void gm20b_priv_ring_enable(struct gk20a *g) void gm20b_priv_ring_enable(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
nvgpu_log_info(g, "priv ring is already enabled"); nvgpu_log_info(g, "priv ring is already enabled");
return; return;
} }
#endif
nvgpu_log_info(g, "enabling priv ring"); nvgpu_log_info(g, "enabling priv ring");
@@ -63,10 +65,12 @@ void gm20b_priv_ring_isr(struct gk20a *g)
u32 gpc_priv_stride; u32 gpc_priv_stride;
u32 gpc_offset; u32 gpc_offset;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
nvgpu_err(g, "unhandled priv ring intr"); nvgpu_err(g, "unhandled priv ring intr");
return; return;
} }
#endif
status0 = nvgpu_readl(g, pri_ringmaster_intr_status0_r()); status0 = nvgpu_readl(g, pri_ringmaster_intr_status0_r());
status1 = nvgpu_readl(g, pri_ringmaster_intr_status1_r()); status1 = nvgpu_readl(g, pri_ringmaster_intr_status1_r());

View File

@@ -116,10 +116,12 @@ void gp10b_priv_ring_isr(struct gk20a *g)
u32 error_info; u32 error_info;
u32 error_code; u32 error_code;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
nvgpu_info(g, "unhandled priv ring intr"); nvgpu_info(g, "unhandled priv ring intr");
return; return;
} }
#endif
status0 = nvgpu_readl(g, pri_ringmaster_intr_status0_r()); status0 = nvgpu_readl(g, pri_ringmaster_intr_status0_r());
status1 = nvgpu_readl(g, pri_ringmaster_intr_status1_r()); status1 = nvgpu_readl(g, pri_ringmaster_intr_status1_r());

View File

@@ -95,11 +95,13 @@ int gm20b_elcg_init_idle_filters(struct gk20a *g)
active_engine_id = f->active_engines_list[engine_id]; active_engine_id = f->active_engines_list[engine_id];
gate_ctrl = nvgpu_readl(g, therm_gate_ctrl_r(active_engine_id)); gate_ctrl = nvgpu_readl(g, therm_gate_ctrl_r(active_engine_id));
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
gate_ctrl = set_field(gate_ctrl, gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_after_m(), therm_gate_ctrl_eng_delay_after_m(),
therm_gate_ctrl_eng_delay_after_f(4)); therm_gate_ctrl_eng_delay_after_f(4));
} }
#endif
/* 2 * (1 << 9) = 1024 clks */ /* 2 * (1 << 9) = 1024 clks */
gate_ctrl = set_field(gate_ctrl, gate_ctrl = set_field(gate_ctrl,

View File

@@ -544,7 +544,9 @@ struct gpu_ops {
const u32 *ucode_u32_data, u32 size); const u32 *ucode_u32_data, u32 size);
void (*load_fecs_imem)(struct gk20a *g, void (*load_fecs_imem)(struct gk20a *g,
const u32 *ucode_u32_data, u32 size); const u32 *ucode_u32_data, u32 size);
#ifdef CONFIG_NVGPU_SIM
void (*configure_fmodel)(struct gk20a *g); void (*configure_fmodel)(struct gk20a *g);
#endif
void (*start_ucode)(struct gk20a *g); void (*start_ucode)(struct gk20a *g);
void (*start_gpccs)(struct gk20a *g); void (*start_gpccs)(struct gk20a *g);
void (*start_fecs)(struct gk20a *g); void (*start_fecs)(struct gk20a *g);
@@ -1895,7 +1897,9 @@ struct gk20a {
struct nvgpu_nvlink_dev nvlink; struct nvgpu_nvlink_dev nvlink;
struct nvgpu_gr *gr; struct nvgpu_gr *gr;
struct nvgpu_fbp *fbp; struct nvgpu_fbp *fbp;
#ifdef CONFIG_NVGPU_SIM
struct sim_nvgpu *sim; struct sim_nvgpu *sim;
#endif
struct mm_gk20a mm; struct mm_gk20a mm;
struct nvgpu_pmu *pmu; struct nvgpu_pmu *pmu;
struct nvgpu_acr *acr; struct nvgpu_acr *acr;

View File

@@ -22,6 +22,8 @@
#ifndef NVGPU_SIM_H #ifndef NVGPU_SIM_H
#define NVGPU_SIM_H #define NVGPU_SIM_H
#ifdef CONFIG_NVGPU_SIM
#include <nvgpu/nvgpu_mem.h> #include <nvgpu/nvgpu_mem.h>
struct gk20a; struct gk20a;
@@ -58,4 +60,5 @@ void sim_writel(struct sim_nvgpu *sim, u32 r, u32 v);
u32 sim_readl(struct sim_nvgpu *sim, u32 r); u32 sim_readl(struct sim_nvgpu *sim, u32 r);
int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g); int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g);
#endif
#endif /* NVGPU_SIM_H */ #endif /* NVGPU_SIM_H */

View File

@@ -389,11 +389,6 @@
"test_level": 0, "test_level": 0,
"unit": "fuse" "unit": "fuse"
}, },
{
"test": "fuse_gm20b_check_fmodel",
"test_level": 0,
"unit": "fuse"
},
{ {
"test": "fuse_gm20b_check_gcplex_fail", "test": "fuse_gm20b_check_gcplex_fail",
"test_level": 0, "test_level": 0,
@@ -424,11 +419,6 @@
"test_level": 0, "test_level": 0,
"unit": "fuse" "unit": "fuse"
}, },
{
"test": "fuse_gp10b_check_fmodel",
"test_level": 0,
"unit": "fuse"
},
{ {
"test": "fuse_gp10b_check_gcplex_fail", "test": "fuse_gp10b_check_gcplex_fail",
"test_level": 0, "test_level": 0,

View File

@@ -302,6 +302,7 @@ int test_fuse_gm20b_basic_fuses(struct unit_module *m,
return ret; return ret;
} }
#ifdef CONFIG_NVGPU_SIM
/* Verify when FMODEL is enabled, fuse module reports non-secure */ /* Verify when FMODEL is enabled, fuse module reports non-secure */
int test_fuse_gm20b_check_fmodel(struct unit_module *m, int test_fuse_gm20b_check_fmodel(struct unit_module *m,
struct gk20a *g, void *__args) struct gk20a *g, void *__args)
@@ -331,3 +332,4 @@ int test_fuse_gm20b_check_fmodel(struct unit_module *m,
nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false);
return ret; return ret;
} }
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,7 +39,8 @@ int test_fuse_gm20b_check_non_sec(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
int test_fuse_gm20b_basic_fuses(struct unit_module *m, int test_fuse_gm20b_basic_fuses(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
#ifdef CONFIG_NVGPU_SIM
int test_fuse_gm20b_check_fmodel(struct unit_module *m, int test_fuse_gm20b_check_fmodel(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
#endif
#endif /* __UNIT_NVGPU_FUSE_GM20B_H__ */ #endif /* __UNIT_NVGPU_FUSE_GM20B_H__ */

View File

@@ -235,6 +235,7 @@ int test_fuse_gp10b_feature_override_disable(struct unit_module *m,
return ret; return ret;
} }
#ifdef CONFIG_NVGPU_SIM
/* Verify when FMODEL is enabled, fuse module reports non-secure */ /* Verify when FMODEL is enabled, fuse module reports non-secure */
int test_fuse_gp10b_check_fmodel(struct unit_module *m, int test_fuse_gp10b_check_fmodel(struct unit_module *m,
struct gk20a *g, void *__args) struct gk20a *g, void *__args)
@@ -264,3 +265,4 @@ int test_fuse_gp10b_check_fmodel(struct unit_module *m,
nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false);
return ret; return ret;
} }
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,8 @@ int test_fuse_gp10b_ecc(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
int test_fuse_gp10b_feature_override_disable(struct unit_module *m, int test_fuse_gp10b_feature_override_disable(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
#ifdef CONFIG_NVGPU_SIM
int test_fuse_gp10b_check_fmodel(struct unit_module *m, int test_fuse_gp10b_check_fmodel(struct unit_module *m,
struct gk20a *g, void *__args); struct gk20a *g, void *__args);
#endif
#endif /* __UNIT_NVGPU_FUSE_GP10B_H__ */ #endif /* __UNIT_NVGPU_FUSE_GP10B_H__ */

View File

@@ -158,7 +158,9 @@ struct unit_module_test fuse_tests[] = {
UNIT_TEST(fuse_gp10b_ecc, test_fuse_gp10b_ecc, NULL, 0), UNIT_TEST(fuse_gp10b_ecc, test_fuse_gp10b_ecc, NULL, 0),
UNIT_TEST(fuse_gp10b_feature_override_disable, UNIT_TEST(fuse_gp10b_feature_override_disable,
test_fuse_gp10b_feature_override_disable, NULL, 0), test_fuse_gp10b_feature_override_disable, NULL, 0),
#ifdef CONFIG_NVGPU_SIM
UNIT_TEST(fuse_gp10b_check_fmodel, test_fuse_gp10b_check_fmodel, NULL, 0), UNIT_TEST(fuse_gp10b_check_fmodel, test_fuse_gp10b_check_fmodel, NULL, 0),
#endif
UNIT_TEST(fuse_gp10b_cleanup, test_fuse_device_common_cleanup, UNIT_TEST(fuse_gp10b_cleanup, test_fuse_device_common_cleanup,
&gp10b_init_args, 0), &gp10b_init_args, 0),
@@ -178,7 +180,9 @@ struct unit_module_test fuse_tests[] = {
NULL, NULL,
0), 0),
UNIT_TEST(fuse_gm20b_basic_fuses, test_fuse_gm20b_basic_fuses, NULL, 0), UNIT_TEST(fuse_gm20b_basic_fuses, test_fuse_gm20b_basic_fuses, NULL, 0),
#ifdef CONFIG_NVGPU_SIM
UNIT_TEST(fuse_gm20b_check_fmodel, test_fuse_gm20b_check_fmodel, NULL, 0), UNIT_TEST(fuse_gm20b_check_fmodel, test_fuse_gm20b_check_fmodel, NULL, 0),
#endif
UNIT_TEST(fuse_gm20b_cleanup, test_fuse_device_common_cleanup, UNIT_TEST(fuse_gm20b_cleanup, test_fuse_device_common_cleanup,
&gm20b_init_args, 0), &gm20b_init_args, 0),