gpu: nvgpu: gv11b: Secure boot support.

This patch adds Secure boot support for T194.

JIRA GPUT19X-5

Change-Id: If78e5e0ecfa58bcac132716c7f2c155f21899027
Signed-off-by: Deepak Goyal <dgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1514558
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
Deepak Goyal
2017-08-30 15:03:25 +05:30
committed by mobile promotions
parent 1ac8f6477d
commit c094ea1617
7 changed files with 379 additions and 13 deletions

View File

@@ -15,6 +15,7 @@ nvgpu-y += \
$(nvgpu-t19x)/gv11b/ce_gv11b.o \ $(nvgpu-t19x)/gv11b/ce_gv11b.o \
$(nvgpu-t19x)/gv11b/gr_ctx_gv11b.o \ $(nvgpu-t19x)/gv11b/gr_ctx_gv11b.o \
$(nvgpu-t19x)/gv11b/pmu_gv11b.o \ $(nvgpu-t19x)/gv11b/pmu_gv11b.o \
$(nvgpu-t19x)/gv11b/acr_gv11b.o \
$(nvgpu-t19x)/gv11b/subctx_gv11b.o \ $(nvgpu-t19x)/gv11b/subctx_gv11b.o \
$(nvgpu-t19x)/gv11b/regops_gv11b.o \ $(nvgpu-t19x)/gv11b/regops_gv11b.o \
$(nvgpu-t19x)/gv100/mm_gv100.o \ $(nvgpu-t19x)/gv100/mm_gv100.o \

View File

@@ -0,0 +1,285 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#include <nvgpu/types.h>
#include <linux/platform/tegra/mc.h>
#include <nvgpu/dma.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/timers.h>
#include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/acr/nvgpu_acr.h>
#include <nvgpu/firmware.h>
#include "gk20a/gk20a.h"
#include "acr_gv11b.h"
#include "pmu_gv11b.h"
#include "gk20a/pmu_gk20a.h"
#include "gm20b/mm_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gp106/acr_gp106.h"
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
/*Defines*/
#define gv11b_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
{
dma_addr->lo |= u64_lo32(value);
dma_addr->hi |= u64_hi32(value);
}
/*Externs*/
/*Forwards*/
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
* start and end are addresses of ucode blob in non-WPR region*/
int gv11b_bootstrap_hs_flcn(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
int err = 0;
u64 *acr_dmem;
u32 img_size_in_bytes = 0;
u32 status, size, index;
u64 start;
struct acr_desc *acr = &g->acr;
struct nvgpu_firmware *acr_fw = acr->acr_fw;
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
u32 *acr_ucode_header_t210_load;
u32 *acr_ucode_data_t210_load;
start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
size = acr->ucode_blob.size;
gv11b_dbg_pmu("acr ucode blob start %llx\n", start);
gv11b_dbg_pmu("acr ucode blob size %x\n", size);
gv11b_dbg_pmu("");
if (!acr_fw) {
/*First time init case*/
acr_fw = nvgpu_request_firmware(g,
GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
if (!acr_fw) {
nvgpu_err(g, "pmu ucode get fail");
return -ENOENT;
}
acr->acr_fw = acr_fw;
acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
acr->hsbin_hdr->header_offset);
acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
acr->hsbin_hdr->data_offset);
acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
acr->fw_hdr->hdr_offset);
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
gv11b_dbg_pmu("sig dbg offset %u\n",
acr->fw_hdr->sig_dbg_offset);
gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
gv11b_dbg_pmu("sig prod offset %u\n",
acr->fw_hdr->sig_prod_offset);
gv11b_dbg_pmu("sig prod size %u\n",
acr->fw_hdr->sig_prod_size);
gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc);
gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig);
gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset);
gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size);
/* Lets patch the signatures first.. */
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
(u32 *)(acr_fw->data +
acr->fw_hdr->sig_prod_offset),
(u32 *)(acr_fw->data +
acr->fw_hdr->sig_dbg_offset),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_loc),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_sig)) < 0) {
nvgpu_err(g, "patch signatures fail");
err = -1;
goto err_release_acr_fw;
}
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
&acr->acr_ucode);
if (err) {
err = -ENOMEM;
goto err_release_acr_fw;
}
for (index = 0; index < 9; index++)
gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n",
acr_ucode_header_t210_load[index]);
acr_dmem = (u64 *)
&(((u8 *)acr_ucode_data_t210_load)[
acr_ucode_header_t210_load[2]]);
acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
(start);
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
size;
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
acr_ucode_data_t210_load, img_size_in_bytes);
/*
* In order to execute this binary, we will be using
* a bootloader which will load this image into PMU IMEM/DMEM.
* Fill up the bootloader descriptor for PMU HAL to use..
* TODO: Use standard descriptor which the generic bootloader is
* checked in.
*/
bl_dmem_desc->signature[0] = 0;
bl_dmem_desc->signature[1] = 0;
bl_dmem_desc->signature[2] = 0;
bl_dmem_desc->signature[3] = 0;
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
flcn64_set_dma(&bl_dmem_desc->code_dma_base,
acr->acr_ucode.gpu_va);
bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
flcn64_set_dma(&bl_dmem_desc->data_dma_base,
acr->acr_ucode.gpu_va +
acr_ucode_header_t210_load[2]);
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
} else
acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
if (status != 0) {
err = status;
goto err_free_ucode_map;
}
return 0;
err_free_ucode_map:
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
err_release_acr_fw:
nvgpu_release_firmware(g, acr_fw);
acr->acr_fw = NULL;
return err;
}
static int bl_bootstrap(struct nvgpu_pmu *pmu,
struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct acr_desc *acr = &g->acr;
struct mm_gk20a *mm = &g->mm;
u32 virt_addr = 0;
struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
u32 dst;
gk20a_dbg_fn("");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
pwr_pmu_new_instblk_valid_f(1) |
pwr_pmu_new_instblk_target_sys_ncoh_f());
/*copy bootloader interface structure to dmem*/
nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
sizeof(struct flcn_bl_dmem_desc_v1), 0);
/* copy bootloader to TOP of IMEM */
dst = (pwr_falcon_hwcfg_imem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r())) << 8) - bl_sz;
nvgpu_flcn_copy_to_imem(pmu->flcn, dst,
(u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
pmu_bl_gm10x_desc->bl_start_tag);
gv11b_dbg_pmu("Before starting falcon with BL\n");
virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
nvgpu_flcn_bootstrap(pmu->flcn, virt_addr);
return 0;
}
int gv11b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz)
{
struct nvgpu_pmu *pmu = &g->pmu;
int err;
gk20a_dbg_fn("");
nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_flcn_reset(pmu->flcn);
pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex);
/* setup apertures - virtual */
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
pwr_fbif_transcfg_mem_type_virtual_f());
/* setup apertures - physical */
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_coherent_sysmem_f());
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
pwr_fbif_transcfg_mem_type_physical_f() |
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
/*disable irqs for hs falcon booting as we will poll for halt*/
nvgpu_mutex_acquire(&pmu->isr_mutex);
pmu_enable_irq(pmu, false);
pmu->isr_enabled = false;
nvgpu_mutex_release(&pmu->isr_mutex);
/*Clearing mailbox register used to reflect capabilities*/
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
err = bl_bootstrap(pmu, desc, bl_sz);
if (err)
return err;
return 0;
}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __ACR_GV11B_H_
#define __ACR_GV11B_H_
int gv11b_bootstrap_hs_flcn(struct gk20a *g);
int gv11b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz);
#endif /*__PMU_GP106_H_*/

View File

@@ -55,6 +55,7 @@
#include "gp10b/gr_gp10b.h" #include "gp10b/gr_gp10b.h"
#include "gp106/pmu_gp106.h" #include "gp106/pmu_gp106.h"
#include "gp106/acr_gp106.h"
#include "hal_gv11b.h" #include "hal_gv11b.h"
#include "gr_gv11b.h" #include "gr_gv11b.h"
@@ -65,6 +66,7 @@
#include "gr_ctx_gv11b.h" #include "gr_ctx_gv11b.h"
#include "mm_gv11b.h" #include "mm_gv11b.h"
#include "pmu_gv11b.h" #include "pmu_gv11b.h"
#include "acr_gv11b.h"
#include "fb_gv11b.h" #include "fb_gv11b.h"
#include "fifo_gv11b.h" #include "fifo_gv11b.h"
#include "gv11b_gating_reglist.h" #include "gv11b_gating_reglist.h"
@@ -79,6 +81,7 @@
#include <nvgpu/hw/gv11b/hw_ram_gv11b.h> #include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
#include <nvgpu/hw/gv11b/hw_top_gv11b.h> #include <nvgpu/hw/gv11b/hw_top_gv11b.h>
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> #include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fuse_gv11b.h>
static int gv11b_get_litter_value(struct gk20a *g, int value) static int gv11b_get_litter_value(struct gk20a *g, int value)
{ {
@@ -633,6 +636,8 @@ int gv11b_init_hal(struct gk20a *g)
{ {
struct gpu_ops *gops = &g->ops; struct gpu_ops *gops = &g->ops;
struct nvgpu_gpu_characteristics *c = &g->gpu_characteristics; struct nvgpu_gpu_characteristics *c = &g->gpu_characteristics;
u32 val;
bool priv_security;
gops->ltc = gv11b_ops.ltc; gops->ltc = gv11b_ops.ltc;
gops->ce2 = gv11b_ops.ce2; gops->ce2 = gv11b_ops.ce2;
@@ -661,33 +666,38 @@ int gv11b_init_hal(struct gk20a *g)
gv11b_ops.chip_init_gpu_characteristics; gv11b_ops.chip_init_gpu_characteristics;
gops->get_litter_value = gv11b_ops.get_litter_value; gops->get_litter_value = gv11b_ops.get_litter_value;
/* boot in non-secure modes for time being */ val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
if (val) {
priv_security = true;
pr_err("priv security is enabled\n");
} else {
priv_security = false;
pr_err("priv security is disabled\n");
}
__nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, false); __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, false); __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security);
/* priv security dependent ops */ /* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */ /* Add in ops from gm20b acr */
gops->pmu.prepare_ucode = prepare_ucode_blob, gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn, gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
gops->pmu.is_priv_load = gm20b_is_priv_load,
gops->pmu.get_wpr = gm20b_wpr_info, gops->pmu.get_wpr = gm20b_wpr_info,
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space, gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
gops->pmu.pmu_populate_loader_cfg = gops->pmu.pmu_populate_loader_cfg =
gm20b_pmu_populate_loader_cfg, gp106_pmu_populate_loader_cfg,
gops->pmu.flcn_populate_bl_dmem_desc = gops->pmu.flcn_populate_bl_dmem_desc =
gm20b_flcn_populate_bl_dmem_desc, gp106_flcn_populate_bl_dmem_desc,
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt, gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
gops->pmu.falcon_clear_halt_interrupt_status = gops->pmu.falcon_clear_halt_interrupt_status =
clear_halt_interrupt_status, clear_halt_interrupt_status,
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1, gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
gops->pmu.init_wpr_region = gm20b_pmu_init_acr; gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode; gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap; gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap,
gops->pmu.is_priv_load = gp10b_is_priv_load; gops->pmu.is_priv_load = gv11b_is_priv_load,
gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
} else { } else {
@@ -702,8 +712,10 @@ int gv11b_init_hal(struct gk20a *g)
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
} }
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
gv11b_init_uncompressed_kind_map(); gv11b_init_uncompressed_kind_map();
gv11b_init_kind_attr(); gv11b_init_kind_attr();
g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
g->name = "gv11b"; g->name = "gv11b";

View File

@@ -28,6 +28,7 @@
#include "gp106/pmu_gp106.h" #include "gp106/pmu_gp106.h"
#include "pmu_gv11b.h" #include "pmu_gv11b.h"
#include "acr_gv11b.h"
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h> #include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
@@ -41,6 +42,42 @@ bool gv11b_is_pmu_supported(struct gk20a *g)
return true; return true;
} }
bool gv11b_is_lazy_bootstrap(u32 falcon_id)
{
bool enable_status = false;
switch (falcon_id) {
case LSF_FALCON_ID_FECS:
enable_status = true;
break;
case LSF_FALCON_ID_GPCCS:
enable_status = true;
break;
default:
break;
}
return enable_status;
}
bool gv11b_is_priv_load(u32 falcon_id)
{
bool enable_status = false;
switch (falcon_id) {
case LSF_FALCON_ID_FECS:
enable_status = true;
break;
case LSF_FALCON_ID_GPCCS:
enable_status = true;
break;
default:
break;
}
return enable_status;
}
int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu) int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = gk20a_from_pmu(pmu); struct gk20a *g = gk20a_from_pmu(pmu);

View File

@@ -22,5 +22,7 @@ bool gv11b_is_pmu_supported(struct gk20a *g);
int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu); int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu);
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id); int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id);
bool gv11b_is_lazy_bootstrap(u32 falcon_id);
bool gv11b_is_priv_load(u32 falcon_id);
#endif /*__PMU_GV11B_H_*/ #endif /*__PMU_GV11B_H_*/

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -134,4 +134,12 @@ static inline u32 fuse_opt_feature_fuses_override_disable_r(void)
{ {
return 0x000213f0; return 0x000213f0;
} }
static inline u32 fuse_opt_sec_debug_en_r(void)
{
return 0x00021218;
}
static inline u32 fuse_opt_priv_sec_en_r(void)
{
return 0x00021434;
}
#endif #endif