gpu: nvgpu: MISRA 21.2 fixes: __nvgpu_set_enabled()

Rename __nvgpu_set_enabled() to nvgpu_set_enabled(). The original
double underscore was present to indicate that this function is a
function with potentially unintended side effects (enabling a feature
has wide ranging impact).

To not lose this documentation a comment was added to convey that this
function must be used with care.

JIRA NVGPU-1029

Change-Id: I8bfc6fa4c17743f9f8056cb6a7a0f66229ca2583
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1989434
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-01-07 10:26:39 -08:00
committed by mobile promotions
parent 4ce9c114d5
commit 489236d181
39 changed files with 203 additions and 199 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-18, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,7 +53,7 @@ bool nvgpu_is_enabled(struct gk20a *g, int flag)
return test_bit(flag, g->enabled_flags); return test_bit(flag, g->enabled_flags);
} }
void __nvgpu_set_enabled(struct gk20a *g, int flag, bool state) void nvgpu_set_enabled(struct gk20a *g, int flag, bool state)
{ {
if (state) { if (state) {
set_bit(flag, g->enabled_flags); set_bit(flag, g->enabled_flags);

View File

@@ -1,7 +1,7 @@
/* /*
* GM20B FUSE * GM20B FUSE
* *
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,8 +39,8 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
bool is_auto_fetch_disable = false; bool is_auto_fetch_disable = false;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
nvgpu_log(g, gpu_dbg_info, "priv sec is enabled in fmodel"); nvgpu_log(g, gpu_dbg_info, "priv sec is enabled in fmodel");
return 0; return 0;
} }
@@ -50,7 +50,7 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
return -EINVAL; return -EINVAL;
} }
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
if (gk20a_readl(g, fuse_opt_priv_sec_en_r()) != 0U) { if (gk20a_readl(g, fuse_opt_priv_sec_en_r()) != 0U) {
/* /*
@@ -59,7 +59,7 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
* set to 0. In this case gmmu tries to pull wpr * set to 0. In this case gmmu tries to pull wpr
* and vpr settings from tegra mc * and vpr settings from tegra mc
*/ */
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
is_wpr_enabled = is_wpr_enabled =
(gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U; (gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U;
is_auto_fetch_disable = is_auto_fetch_disable =
@@ -84,7 +84,7 @@ int gm20b_fuse_check_priv_security(struct gk20a *g)
return -EINVAL; return -EINVAL;
} }
} else { } else {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
nvgpu_log(g, gpu_dbg_info, nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, non secure mode", "gcplex_config = 0x%08x, non secure mode",
gcplex_config); gcplex_config);

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B FUSE * GP10B FUSE
* *
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -40,8 +40,8 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
bool is_auto_fetch_disable = false; bool is_auto_fetch_disable = false;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
nvgpu_log(g, gpu_dbg_info, "priv sec is disabled in fmodel"); nvgpu_log(g, gpu_dbg_info, "priv sec is disabled in fmodel");
return 0; return 0;
} }
@@ -58,8 +58,8 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
* set to 0. In this case gmmu tries to pull wpr * set to 0. In this case gmmu tries to pull wpr
* and vpr settings from tegra mc * and vpr settings from tegra mc
*/ */
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
is_wpr_enabled = is_wpr_enabled =
(gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U; (gcplex_config & GCPLEX_CONFIG_WPR_ENABLED_MASK) != 0U;
is_auto_fetch_disable = is_auto_fetch_disable =
@@ -85,8 +85,8 @@ int gp10b_fuse_check_priv_security(struct gk20a *g)
return -EINVAL; return -EINVAL;
} }
} else { } else {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
nvgpu_log(g, gpu_dbg_info, nvgpu_log(g, gpu_dbg_info,
"gcplex_config = 0x%08x, non secure mode", "gcplex_config = 0x%08x, non secure mode",
gcplex_config); gcplex_config);

View File

@@ -504,11 +504,11 @@ int gk20a_wait_for_idle(struct gk20a *g)
int gk20a_init_gpu_characteristics(struct gk20a *g) int gk20a_init_gpu_characteristics(struct gk20a *g)
{ {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
if ((g->ops.mm.support_sparse != NULL) && g->ops.mm.support_sparse(g)) { if ((g->ops.mm.support_sparse != NULL) && g->ops.mm.support_sparse(g)) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
} }
/* /*
@@ -516,7 +516,7 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
* anything that depends on job tracking. (Here, fast means strictly no * anything that depends on job tracking. (Here, fast means strictly no
* metadata, just the gpfifo contents are copied and gp_put updated). * metadata, just the gpfifo contents are copied and gp_put updated).
*/ */
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING, NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING,
true); true);
@@ -527,16 +527,16 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
* that depends on deferred cleanup. * that depends on deferred cleanup.
*/ */
if (!nvgpu_channel_sync_needs_os_fence_framework(g)) { if (!nvgpu_channel_sync_needs_os_fence_framework(g)) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL, NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL,
true); true);
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG, true);
if (g->ops.clk_arb.get_arbiter_clk_domains != NULL && if (g->ops.clk_arb.get_arbiter_clk_domains != NULL &&
g->ops.clk.support_clk_freq_controller) { g->ops.clk.support_clk_freq_controller) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CLOCK_CONTROLS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_CLOCK_CONTROLS, true);
} }
g->ops.gr.detect_sm_arch(g); g->ops.gr.detect_sm_arch(g);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -533,7 +533,7 @@ int nvgpu_nvlink_probe(struct gk20a *g)
} }
/* Enable NVLINK support */ /* Enable NVLINK support */
__nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, true);
return 0; return 0;
unregister_ndev: unregister_ndev:
@@ -561,7 +561,7 @@ int nvgpu_nvlink_remove(struct gk20a *g)
if (!ndev) if (!ndev)
return -ENODEV; return -ENODEV;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false);
err = nvlink_unregister_link(&ndev->link); err = nvlink_unregister_link(&ndev->link);
if (err != 0) { if (err != 0) {

View File

@@ -1712,7 +1712,7 @@ int gv100_nvlink_init(struct gk20a *g)
} }
/* Set HSHUB and SG_PHY */ /* Set HSHUB and SG_PHY */
__nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, true); nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, true);
err = g->ops.fb.enable_nvlink(g); err = g->ops.fb.enable_nvlink(g);
if (err != 0) { if (err != 0) {
@@ -1723,8 +1723,8 @@ int gv100_nvlink_init(struct gk20a *g)
return err; return err;
fail: fail:
__nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, false); nvgpu_set_enabled(g, NVGPU_MM_USE_PHYSICAL_SG, false);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false);
return err; return err;
} }

View File

@@ -603,7 +603,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
pmu->perfmon_ready = false; pmu->perfmon_ready = false;
pmu->zbc_ready = false; pmu->zbc_ready = false;
g->pmu_lsf_pmu_wpr_init_done = false; g->pmu_lsf_pmu_wpr_init_done = false;
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
nvgpu_log_fn(g, "done"); nvgpu_log_fn(g, "done");
return 0; return 0;

View File

@@ -1153,7 +1153,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
set_perfmon_cntr_group_id_v2; set_perfmon_cntr_group_id_v2;
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
g->pmu_ver_cmd_id_zbc_table_update = 16; g->pmu_ver_cmd_id_zbc_table_update = 16;
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true); nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
g->ops.pmu_ver.get_pmu_cmdline_args_size = g->ops.pmu_ver.get_pmu_cmdline_args_size =
pmu_cmdline_size_v4; pmu_cmdline_size_v4;
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1257,7 +1257,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
set_perfmon_cntr_group_id_v2; set_perfmon_cntr_group_id_v2;
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
g->pmu_ver_cmd_id_zbc_table_update = 16; g->pmu_ver_cmd_id_zbc_table_update = 16;
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, false); nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, false);
g->ops.pmu_ver.get_pmu_cmdline_args_size = g->ops.pmu_ver.get_pmu_cmdline_args_size =
pmu_cmdline_size_v6; pmu_cmdline_size_v6;
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1404,7 +1404,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
set_perfmon_cntr_group_id_v2; set_perfmon_cntr_group_id_v2;
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
g->pmu_ver_cmd_id_zbc_table_update = 16; g->pmu_ver_cmd_id_zbc_table_update = 16;
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true); nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
g->ops.pmu_ver.get_pmu_cmdline_args_size = g->ops.pmu_ver.get_pmu_cmdline_args_size =
pmu_cmdline_size_v5; pmu_cmdline_size_v5;
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
@@ -1530,7 +1530,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
set_perfmon_cntr_group_id_v2; set_perfmon_cntr_group_id_v2;
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
g->pmu_ver_cmd_id_zbc_table_update = 16; g->pmu_ver_cmd_id_zbc_table_update = 16;
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true); nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
g->ops.pmu_ver.get_pmu_cmdline_args_size = g->ops.pmu_ver.get_pmu_cmdline_args_size =
pmu_cmdline_size_v3; pmu_cmdline_size_v3;
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =

View File

@@ -422,7 +422,7 @@ int gk20a_fecs_trace_init(struct gk20a *g)
BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)); BUG_ON(!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS));
hash_init(trace->pid_hash_table); hash_init(trace->pid_hash_table);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);
trace->init = true; trace->init = true;

View File

@@ -814,7 +814,7 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
} else { } else {
/* cold boot or rg exit */ /* cold boot or rg exit */
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, true); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, true);
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) { if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
gr_gm20b_load_gpccs_with_bootloader(g); gr_gm20b_load_gpccs_with_bootloader(g);
} else { } else {
@@ -1148,8 +1148,8 @@ u32 gr_gm20b_get_max_fbps_count(struct gk20a *g)
void gr_gm20b_init_cyclestats(struct gk20a *g) void gr_gm20b_init_cyclestats(struct gk20a *g)
{ {
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_GK20A_CYCLE_STATS)
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, true);
g->gr.max_css_buffer_size = 0xffffffffU; g->gr.max_css_buffer_size = 0xffffffffU;
#else #else
(void)g; (void)g;

View File

@@ -825,8 +825,8 @@ int gm20b_init_hal(struct gk20a *g)
gops->get_litter_value = gm20b_ops.get_litter_value; gops->get_litter_value = gm20b_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup; gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */ /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
if (gops->fuse.check_priv_security(g) != 0) { if (gops->fuse.check_priv_security(g) != 0) {
@@ -867,7 +867,7 @@ int gm20b_init_hal(struct gk20a *g)
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
} }
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0; g->pmu_lsf_pmu_wpr_init_done = 0;
g->name = "gm20b"; g->name = "gm20b";

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B Graphics * GP10B Graphics
* *
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -41,10 +41,10 @@ static void gp10b_detect_ecc_enabled_units(struct gk20a *g)
if (opt_feature_fuses_override_disable) { if (opt_feature_fuses_override_disable) {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_LRF, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_LRF, true);
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_SHM, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_SHM, true);
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_TEX, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_TEX, true);
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_LTC, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_LTC, true);
} }
} else { } else {
/* SM LRF */ /* SM LRF */
@@ -52,12 +52,12 @@ static void gp10b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_lrf_v( if (gr_fecs_feature_override_ecc_sm_lrf_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_LRF, true); NVGPU_ECC_ENABLED_SM_LRF, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_LRF, true); NVGPU_ECC_ENABLED_SM_LRF, true);
} }
} }
@@ -67,12 +67,12 @@ static void gp10b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_shm_v( if (gr_fecs_feature_override_ecc_sm_shm_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_SHM, true); NVGPU_ECC_ENABLED_SM_SHM, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_SHM, true); NVGPU_ECC_ENABLED_SM_SHM, true);
} }
} }
@@ -82,12 +82,12 @@ static void gp10b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_tex_v( if (gr_fecs_feature_override_ecc_tex_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_TEX, true); NVGPU_ECC_ENABLED_TEX, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_TEX, true); NVGPU_ECC_ENABLED_TEX, true);
} }
} }
@@ -97,12 +97,12 @@ static void gp10b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_ltc_v( if (gr_fecs_feature_override_ecc_ltc_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_LTC, true); NVGPU_ECC_ENABLED_LTC, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_LTC, true); NVGPU_ECC_ENABLED_LTC, true);
} }
} }
@@ -113,6 +113,6 @@ int gp10b_init_gpu_characteristics(struct gk20a *g)
{ {
gk20a_init_gpu_characteristics(g); gk20a_init_gpu_characteristics(g);
gp10b_detect_ecc_enabled_units(g); gp10b_detect_ecc_enabled_units(g);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, true);
return 0; return 0;
} }

View File

@@ -897,9 +897,9 @@ int gp10b_init_hal(struct gk20a *g)
gops->get_litter_value = gp10b_ops.get_litter_value; gops->get_litter_value = gp10b_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup; gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false); nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */ /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
if (gops->fuse.check_priv_security(g) != 0) { if (gops->fuse.check_priv_security(g) != 0) {
@@ -942,7 +942,7 @@ int gp10b_init_hal(struct gk20a *g)
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
} }
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0; g->pmu_lsf_pmu_wpr_init_done = 0;
g->name = "gp10b"; g->name = "gp10b";

View File

@@ -289,14 +289,14 @@ int gv100_init_gpu_characteristics(struct gk20a *g)
return err; return err;
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE, true);
if (nvgpu_has_syncpoints(g)) { if (nvgpu_has_syncpoints(g)) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true);
return 0; return 0;
} }
@@ -1168,15 +1168,15 @@ int gv100_init_hal(struct gk20a *g)
gops->get_litter_value = gv100_ops.get_litter_value; gops->get_litter_value = gv100_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup; gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true); nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
/* for now */ /* for now */
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
g->pmu_lsf_pmu_wpr_init_done = 0; g->pmu_lsf_pmu_wpr_init_done = 0;
gops->clk.split_rail_support = false; gops->clk.split_rail_support = false;

View File

@@ -4575,16 +4575,16 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
if (opt_feature_fuses_override_disable) { if (opt_feature_fuses_override_disable) {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_LRF, true); NVGPU_ECC_ENABLED_SM_LRF, true);
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_DATA, true); NVGPU_ECC_ENABLED_SM_L1_DATA, true);
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_TAG, true); NVGPU_ECC_ENABLED_SM_L1_TAG, true);
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_ICACHE, true); NVGPU_ECC_ENABLED_SM_ICACHE, true);
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_LTC, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_LTC, true);
__nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_CBU, true); nvgpu_set_enabled(g, NVGPU_ECC_ENABLED_SM_CBU, true);
} }
} else { } else {
/* SM LRF */ /* SM LRF */
@@ -4592,12 +4592,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_lrf_v( if (gr_fecs_feature_override_ecc_sm_lrf_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_LRF, true); NVGPU_ECC_ENABLED_SM_LRF, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_LRF, true); NVGPU_ECC_ENABLED_SM_LRF, true);
} }
} }
@@ -4606,12 +4606,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_l1_data_v( if (gr_fecs_feature_override_ecc_sm_l1_data_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_DATA, true); NVGPU_ECC_ENABLED_SM_L1_DATA, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_DATA, true); NVGPU_ECC_ENABLED_SM_L1_DATA, true);
} }
} }
@@ -4620,12 +4620,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_l1_tag_v( if (gr_fecs_feature_override_ecc_sm_l1_tag_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_TAG, true); NVGPU_ECC_ENABLED_SM_L1_TAG, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_L1_TAG, true); NVGPU_ECC_ENABLED_SM_L1_TAG, true);
} }
} }
@@ -4638,12 +4638,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) && fecs_feature_override_ecc) == 1U) &&
(gr_fecs_feature_override_ecc_1_sm_l1_icache_v( (gr_fecs_feature_override_ecc_1_sm_l1_icache_v(
fecs_feature_override_ecc) == 1U)) { fecs_feature_override_ecc) == 1U)) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_ICACHE, true); NVGPU_ECC_ENABLED_SM_ICACHE, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_ICACHE, true); NVGPU_ECC_ENABLED_SM_ICACHE, true);
} }
} }
@@ -4652,12 +4652,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_ltc_v( if (gr_fecs_feature_override_ecc_ltc_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_LTC, true); NVGPU_ECC_ENABLED_LTC, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_LTC, true); NVGPU_ECC_ENABLED_LTC, true);
} }
} }
@@ -4666,12 +4666,12 @@ static void gr_gv11b_detect_ecc_enabled_units(struct gk20a *g)
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
if (gr_fecs_feature_override_ecc_sm_cbu_v( if (gr_fecs_feature_override_ecc_sm_cbu_v(
fecs_feature_override_ecc) == 1U) { fecs_feature_override_ecc) == 1U) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_CBU, true); NVGPU_ECC_ENABLED_SM_CBU, true);
} }
} else { } else {
if (opt_ecc_en) { if (opt_ecc_en) {
__nvgpu_set_enabled(g, nvgpu_set_enabled(g,
NVGPU_ECC_ENABLED_SM_CBU, true); NVGPU_ECC_ENABLED_SM_CBU, true);
} }
} }

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B Graphics * GV11B Graphics
* *
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,12 +30,12 @@
int gv11b_init_gpu_characteristics(struct gk20a *g) int gv11b_init_gpu_characteristics(struct gk20a *g)
{ {
gk20a_init_gpu_characteristics(g); gk20a_init_gpu_characteristics(g);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true);
return 0; return 0;
} }

View File

@@ -1030,7 +1030,7 @@ int gv11b_init_hal(struct gk20a *g)
gops->get_litter_value = gv11b_ops.get_litter_value; gops->get_litter_value = gv11b_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup; gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */ /* Read fuses to check if gpu needs to boot in secure/non-secure mode */
if (gops->fuse.check_priv_security(g) != 0) { if (gops->fuse.check_priv_security(g) != 0) {
@@ -1069,10 +1069,10 @@ int gv11b_init_hal(struct gk20a *g)
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
} }
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true); nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
g->name = "gv11b"; g->name = "gv11b";

View File

@@ -190,15 +190,21 @@ struct gk20a;
bool nvgpu_is_enabled(struct gk20a *g, int flag); bool nvgpu_is_enabled(struct gk20a *g, int flag);
/** /**
* __nvgpu_set_enabled - Set the state of a flag. * nvgpu_set_enabled - Set the state of a flag.
* *
* @g - The GPU. * @g - The GPU.
* @flag - Which flag to modify. * @flag - Which flag to modify.
* @state - The state to set the flag to. * @state - The state to set the flag to.
* *
* Set the state of the passed @flag to @state. * Set the state of the passed @flag to @state.
*
* This is generally a somewhat low level operation with lots of potential
* side effects. Be weary about where and when you use this. Typically a bunch
* of calls to this early in the driver boot sequence makes sense (as
* information is determined about the GPU at run time). Calling this in steady
* state operation is probably an incorrect thing to do.
*/ */
void __nvgpu_set_enabled(struct gk20a *g, int flag, bool state); void nvgpu_set_enabled(struct gk20a *g, int flag, bool state);
int nvgpu_init_enabled_flags(struct gk20a *g); int nvgpu_init_enabled_flags(struct gk20a *g);
void nvgpu_free_enabled_flags(struct gk20a *g); void nvgpu_free_enabled_flags(struct gk20a *g);

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
__bug __bug
__nvgpu_get_pte __nvgpu_get_pte
__nvgpu_kfree __nvgpu_kfree
@@ -8,7 +8,6 @@ __nvgpu_log_dbg
__nvgpu_posix_ffs __nvgpu_posix_ffs
__nvgpu_posix_fls __nvgpu_posix_fls
__nvgpu_readl __nvgpu_readl
__nvgpu_set_enabled
__nvgpu_set_pte __nvgpu_set_pte
bitmap_clear bitmap_clear
bitmap_find_next_zero_area_off bitmap_find_next_zero_area_off
@@ -109,6 +108,7 @@ nvgpu_pramin_wr_n
nvgpu_readl nvgpu_readl
nvgpu_runlist_construct_locked nvgpu_runlist_construct_locked
nvgpu_rwsem_init nvgpu_rwsem_init
nvgpu_set_enabled
nvgpu_sgt_create_from_mem nvgpu_sgt_create_from_mem
nvgpu_sgt_alignment nvgpu_sgt_alignment
nvgpu_sgt_free nvgpu_sgt_free

View File

@@ -96,7 +96,7 @@ static void nvgpu_init_vars(struct gk20a *g)
nvgpu_init_list_node(&g->boardobj_head); nvgpu_init_list_node(&g->boardobj_head);
nvgpu_init_list_node(&g->boardobjgrp_head); nvgpu_init_list_node(&g->boardobjgrp_head);
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, platform->has_syncpoints); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, platform->has_syncpoints);
} }
static void nvgpu_init_gr_vars(struct gk20a *g) static void nvgpu_init_gr_vars(struct gk20a *g)
@@ -161,11 +161,11 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
g->can_elpg = g->can_elpg =
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false; nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG, nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG,
nvgpu_platform_is_silicon(g) ? platform->can_elcg : false); nvgpu_platform_is_silicon(g) ? platform->can_elcg : false);
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_SLCG, nvgpu_set_enabled(g, NVGPU_GPU_CAN_SLCG,
nvgpu_platform_is_silicon(g) ? platform->can_slcg : false); nvgpu_platform_is_silicon(g) ? platform->can_slcg : false);
__nvgpu_set_enabled(g, NVGPU_GPU_CAN_BLCG, nvgpu_set_enabled(g, NVGPU_GPU_CAN_BLCG,
nvgpu_platform_is_silicon(g) ? platform->can_blcg : false); nvgpu_platform_is_silicon(g) ? platform->can_blcg : false);
g->aggressive_sync_destroy = platform->aggressive_sync_destroy; g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
@@ -175,7 +175,7 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
#endif #endif
g->ptimer_src_freq = platform->ptimer_src_freq; g->ptimer_src_freq = platform->ptimer_src_freq;
g->support_pmu = support_gk20a_pmu(dev_from_gk20a(g)); g->support_pmu = support_gk20a_pmu(dev_from_gk20a(g));
__nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init); nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init);
g->can_tpc_powergate = platform->can_tpc_powergate; g->can_tpc_powergate = platform->can_tpc_powergate;
for (i = 0; i < MAX_TPC_PG_CONFIGS; i++) for (i = 0; i < MAX_TPC_PG_CONFIGS; i++)
@@ -187,7 +187,7 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
g->railgate_delay = platform->railgate_delay_init; g->railgate_delay = platform->railgate_delay_init;
else else
g->railgate_delay = NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT; g->railgate_delay = NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT;
__nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon); nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
/* set default values to aelpg parameters */ /* set default values to aelpg parameters */
g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US; g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
@@ -196,14 +196,14 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
g->pmu.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US; g->pmu.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
g->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT; g->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm); nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm);
} }
static void nvgpu_init_vbios_vars(struct gk20a *g) static void nvgpu_init_vbios_vars(struct gk20a *g)
{ {
struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g)); struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
__nvgpu_set_enabled(g, NVGPU_PMU_RUN_PREOS, platform->run_preos); nvgpu_set_enabled(g, NVGPU_PMU_RUN_PREOS, platform->run_preos);
g->vbios_min_version = platform->vbios_min_version; g->vbios_min_version = platform->vbios_min_version;
} }
@@ -219,13 +219,13 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g)); struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
g->mm.disable_bigpage = platform->disable_bigpage; g->mm.disable_bigpage = platform->disable_bigpage;
__nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE, nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE,
platform->honors_aperture); platform->honors_aperture);
__nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY,
platform->unified_memory); platform->unified_memory);
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces); platform->unify_address_spaces);
__nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM, nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM,
platform->force_128K_pmu_vm); platform->force_128K_pmu_vm);
nvgpu_mutex_init(&g->mm.tlb_lock); nvgpu_mutex_init(&g->mm.tlb_lock);

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics * GK20A Graphics
* *
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,7 @@ static int nvgpu_kernel_shutdown_notification(struct notifier_block *nb,
nvgpu_reboot_nb); nvgpu_reboot_nb);
struct gk20a *g = &l->g; struct gk20a *g = &l->g;
__nvgpu_set_enabled(g, NVGPU_KERNEL_IS_DYING, true); nvgpu_set_enabled(g, NVGPU_KERNEL_IS_DYING, true);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@@ -246,12 +246,12 @@ int nvgpu_finalize_poweron_linux(struct nvgpu_os_linux *l)
void gk20a_init_linux_characteristics(struct gk20a *g) void gk20a_init_linux_characteristics(struct gk20a *g)
{ {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
if (IS_ENABLED(CONFIG_SYNC)) { if (IS_ENABLED(CONFIG_SYNC)) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
} }
} }
@@ -1176,7 +1176,7 @@ int nvgpu_start_gpu_idle(struct gk20a *g)
* Set NVGPU_DRIVER_IS_DYING to avoid gpu being marked * Set NVGPU_DRIVER_IS_DYING to avoid gpu being marked
* busy to submit new work to gpu. * busy to submit new work to gpu.
*/ */
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
up_write(&l->busy_lock); up_write(&l->busy_lock);
@@ -1208,7 +1208,7 @@ void gk20a_driver_start_unload(struct gk20a *g)
nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n"); nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
down_write(&l->busy_lock); down_write(&l->busy_lock);
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
/* GR SW ready needs to be invalidated at this time with the busy lock /* GR SW ready needs to be invalidated at this time with the busy lock
* held to prevent a racing condition on the gr/mm code */ * held to prevent a racing condition on the gr/mm code */
g->gr.sw_ready = false; g->gr.sw_ready = false;
@@ -1326,12 +1326,12 @@ static int gk20a_probe(struct platform_device *dev)
np = nvgpu_get_node(gk20a); np = nvgpu_get_node(gk20a);
if (of_dma_is_coherent(np)) { if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true); nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true); nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
} }
if (nvgpu_platform_is_simulation(gk20a)) if (nvgpu_platform_is_simulation(gk20a))
__nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
gk20a->irq_stall = platform_get_irq(dev, 0); gk20a->irq_stall = platform_get_irq(dev, 0);
gk20a->irq_nonstall = platform_get_irq(dev, 1); gk20a->irq_nonstall = platform_get_irq(dev, 1);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@ int nvgpu_get_nvhost_dev(struct gk20a *g)
} else { } else {
if (nvgpu_has_syncpoints(g)) { if (nvgpu_has_syncpoints(g)) {
nvgpu_warn(g, "host1x reference not found. assuming no syncpoints support"); nvgpu_warn(g, "host1x reference not found. assuming no syncpoints support");
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
} }
return 0; return 0;
} }
@@ -270,7 +270,7 @@ int nvgpu_nvhost_syncpt_init(struct gk20a *g)
err = nvgpu_get_nvhost_dev(g); err = nvgpu_get_nvhost_dev(g);
if (err) { if (err) {
nvgpu_err(g, "host1x device not available"); nvgpu_err(g, "host1x device not available");
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
return -ENOSYS; return -ENOSYS;
} }
@@ -280,7 +280,7 @@ int nvgpu_nvhost_syncpt_init(struct gk20a *g)
&g->syncpt_unit_size); &g->syncpt_unit_size);
if (err) { if (err) {
nvgpu_err(g, "Failed to get syncpt interface"); nvgpu_err(g, "Failed to get syncpt interface");
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
return -ENOSYS; return -ENOSYS;
} }

View File

@@ -556,8 +556,8 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
np = nvgpu_get_node(g); np = nvgpu_get_node(g);
if (of_dma_is_coherent(np)) { if (of_dma_is_coherent(np)) {
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true); nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
} }
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
@@ -643,8 +643,8 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
goto err_free_irq; goto err_free_irq;
} }
/* Enable Semaphore SHIM on nvlink only for now. */ /* Enable Semaphore SHIM on nvlink only for now. */
__nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, false);
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
} else { } else {
err = nvgpu_nvhost_syncpt_init(g); err = nvgpu_nvhost_syncpt_init(g);
if (err) { if (err) {

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Tegra Platform Interface * GK20A Tegra Platform Interface
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -646,7 +646,7 @@ int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform)
secure_buffer->destroy = gk20a_tegra_secure_page_destroy; secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
g->ops.secure_alloc = gk20a_tegra_secure_alloc; g->ops.secure_alloc = gk20a_tegra_secure_alloc;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true);
return 0; return 0;
} }
@@ -796,7 +796,7 @@ static int gk20a_tegra_probe(struct device *dev)
if (joint_xpu_rail) { if (joint_xpu_rail) {
nvgpu_log_info(g, "XPU rails are joint\n"); nvgpu_log_info(g, "XPU rails are joint\n");
__nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false); nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false);
} }
platform->g->clk.gpc_pll.id = GK20A_GPC_PLL; platform->g->clk.gpc_pll.id = GK20A_GPC_PLL;

View File

@@ -84,7 +84,7 @@ int nvgpu_init_sim_support_linux_pci(struct gk20a *g)
bool is_simulation; bool is_simulation;
is_simulation = _nvgpu_pci_is_simulation(g, sim_r()); is_simulation = _nvgpu_pci_is_simulation(g, sim_r());
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, is_simulation); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, is_simulation);
if (!is_simulation) if (!is_simulation)
return 0; return 0;

View File

@@ -328,10 +328,10 @@ static ssize_t railgate_enable_store(struct device *dev,
return -EINVAL; return -EINVAL;
if (railgate_enable && !enabled) { if (railgate_enable && !enabled) {
__nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, true); nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, true);
pm_runtime_set_autosuspend_delay(dev, g->railgate_delay); pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
} else if (railgate_enable == 0 && enabled) { } else if (railgate_enable == 0 && enabled) {
__nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false); nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false);
pm_runtime_set_autosuspend_delay(dev, -1); pm_runtime_set_autosuspend_delay(dev, -1);
} }
/* wake-up system to make rail-gating setting effective */ /* wake-up system to make rail-gating setting effective */

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU for Linux * Virtualized GPU for Linux
* *
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -91,12 +91,12 @@ static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
g->aggressive_sync_destroy = platform->aggressive_sync_destroy; g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh; g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
__nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, platform->has_syncpoints); nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, platform->has_syncpoints);
g->ptimer_src_freq = platform->ptimer_src_freq; g->ptimer_src_freq = platform->ptimer_src_freq;
__nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init); nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init);
g->railgate_delay = platform->railgate_delay_init; g->railgate_delay = platform->railgate_delay_init;
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces); platform->unify_address_spaces);
} }
@@ -334,7 +334,7 @@ int vgpu_probe(struct platform_device *pdev)
l->dev = dev; l->dev = dev;
if (tegra_platform_is_vdk()) if (tegra_platform_is_vdk())
__nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
gk20a->is_virtual = true; gk20a->is_virtual = true;

View File

@@ -305,13 +305,13 @@ static int tu104_init_gpu_characteristics(struct gk20a *g)
return err; return err;
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE, true);
if (nvgpu_has_syncpoints(g)) { if (nvgpu_has_syncpoints(g)) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true);
return 0; return 0;
} }
@@ -1204,12 +1204,12 @@ int tu104_init_hal(struct gk20a *g)
gops->get_litter_value = tu104_ops.get_litter_value; gops->get_litter_value = tu104_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup; gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, true);
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true); nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_RTOS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_RTOS, true);
/* for now */ /* for now */
gops->clk.support_clk_freq_controller = false; gops->clk.support_clk_freq_controller = false;
@@ -1235,9 +1235,9 @@ int tu104_init_hal(struct gk20a *g)
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
/* Disable pmu pstate, as there is no pmu support */ /* Disable pmu pstate, as there is no pmu support */
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP,
false); false);
/* Disable fb mem_unlock */ /* Disable fb mem_unlock */
gops->fb.mem_unlock = NULL; gops->fb.mem_unlock = NULL;
@@ -1247,8 +1247,8 @@ int tu104_init_hal(struct gk20a *g)
gops->clk.support_clk_freq_controller = false; gops->clk.support_clk_freq_controller = false;
} else { } else {
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true); nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
} }
g->pmu_lsf_pmu_wpr_init_done = 0; g->pmu_lsf_pmu_wpr_init_done = 0;

View File

@@ -55,7 +55,7 @@ int vgpu_fecs_trace_init(struct gk20a *g)
nvgpu_info(g, "does not support fecs trace"); nvgpu_info(g, "does not support fecs trace");
goto fail; goto fail;
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);
vcst->cookie = vgpu_ivm_mempool_reserve(mempool); vcst->cookie = vgpu_ivm_mempool_reserve(mempool);
if (IS_ERR(vcst->cookie)) { if (IS_ERR(vcst->cookie)) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -33,7 +33,7 @@ void vgpu_gr_gm20b_init_cyclestats(struct gk20a *g)
bool snapshots_supported = true; bool snapshots_supported = true;
/* cyclestats not supported on vgpu */ /* cyclestats not supported on vgpu */
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS, false);
g->gr.max_css_buffer_size = vgpu_css_get_buffer_size(g); g->gr.max_css_buffer_size = vgpu_css_get_buffer_size(g);
@@ -41,8 +41,7 @@ void vgpu_gr_gm20b_init_cyclestats(struct gk20a *g)
if (g->gr.max_css_buffer_size == 0) if (g->gr.max_css_buffer_size == 0)
snapshots_supported = false; snapshots_supported = false;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT, nvgpu_set_enabled(g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT,
snapshots_supported); snapshots_supported);
#endif #endif
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,11 +28,11 @@
int vgpu_gp10b_fuse_check_priv_security(struct gk20a *g) int vgpu_gp10b_fuse_check_priv_security(struct gk20a *g)
{ {
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false);
} else { } else {
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
} }
return 0; return 0;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -38,13 +38,13 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
return err; return err;
} }
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true); nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_USERMODE_SUBMIT, true);
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -280,10 +280,10 @@ int vgpu_init_gpu_characteristics(struct gk20a *g)
if (err) if (err)
return err; return err;
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
/* features vgpu does not support */ /* features vgpu does not support */
__nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false); nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -309,7 +309,7 @@ int test_fuse_gm20b_check_fmodel(struct unit_module *m,
int ret = UNIT_SUCCESS; int ret = UNIT_SUCCESS;
int result; int result;
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, true); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, true);
result = g->ops.fuse.check_priv_security(g); result = g->ops.fuse.check_priv_security(g);
if (result != 0) { if (result != 0) {
@@ -328,6 +328,6 @@ int test_fuse_gm20b_check_fmodel(struct unit_module *m,
ret = UNIT_FAIL; ret = UNIT_FAIL;
} }
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false);
return ret; return ret;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -242,7 +242,7 @@ int test_fuse_gp10b_check_fmodel(struct unit_module *m,
int ret = UNIT_SUCCESS; int ret = UNIT_SUCCESS;
int result; int result;
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, true); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, true);
result = g->ops.fuse.check_priv_security(g); result = g->ops.fuse.check_priv_security(g);
if (result != 0) { if (result != 0) {
@@ -261,7 +261,6 @@ int test_fuse_gp10b_check_fmodel(struct unit_module *m,
ret = UNIT_FAIL; ret = UNIT_FAIL;
} }
__nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false); nvgpu_set_enabled(g, NVGPU_IS_FMODEL, false);
return ret; return ret;
} }

View File

@@ -266,12 +266,12 @@ static const struct nvgpu_sgt_ops nvgpu_sgt_posix_ops = {
static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU) static void init_platform(struct unit_module *m, struct gk20a *g, bool is_iGPU)
{ {
if (is_iGPU) { if (is_iGPU) {
__nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true); nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
/* Features below are mostly to cover corner cases */ /* Features below are mostly to cover corner cases */
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true); nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_NVLINK, true);
} else { } else {
__nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false); nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, false);
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -726,7 +726,7 @@ cleanup:
static int test_pd_cache_env_init(struct unit_module *m, static int test_pd_cache_env_init(struct unit_module *m,
struct gk20a *g, void *args) struct gk20a *g, void *args)
{ {
__nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true); nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@
static int test_fault_injection_init(struct unit_module *m, static int test_fault_injection_init(struct unit_module *m,
struct gk20a *g, void *__args) struct gk20a *g, void *__args)
{ {
__nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true); nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY, true);
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -483,7 +483,7 @@ static int test_pramin_nvgpu_dying(struct unit_module *m, struct gk20a *g,
if (init_test_env(m, g) != 0) { if (init_test_env(m, g) != 0) {
unit_return_fail(m, "Module init failed\n"); unit_return_fail(m, "Module init failed\n");
} }
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
/* /*
* When the GPU is dying, PRAMIN should prevent any accesses, so * When the GPU is dying, PRAMIN should prevent any accesses, so
* pointers to nvgpu_mem and destination data don't matter and can be * pointers to nvgpu_mem and destination data don't matter and can be
@@ -493,7 +493,7 @@ static int test_pramin_nvgpu_dying(struct unit_module *m, struct gk20a *g,
nvgpu_pramin_rd_n(g, NULL, 0, 1, NULL); nvgpu_pramin_rd_n(g, NULL, 0, 1, NULL);
/* Restore GPU driver state for other tests */ /* Restore GPU driver state for other tests */
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, false); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, false);
return UNIT_SUCCESS; return UNIT_SUCCESS;
} }