gpu: nvgpu: install empty register access map in safety

g->ops.gr.init.get_access_map() returns whitelist of register addresses
that can be accessed by SET_FALCON methods when added into pushbuffer.

SET_FALCON method does not need to be supported in safety.
Hence install an empty register access map in safety build by adding
a new flag CONFIG_NVGPU_SET_FALCON_ACCESS_MAP.

Compile out g->ops.gr.init.get_access_map() and code that writes
whitelist in access map buffer.

Note that we still need to configure base address of access map in
context image even for safety.

Jira NVGPU-3995
Bug 2686235

Change-Id: I111b46f96821a09929aff32fcba5bb2215c81b9a
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2185469
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-08-30 18:41:55 +05:30
committed by Alex Waterman
parent 434242cd54
commit cbe5472f39
14 changed files with 31 additions and 1 deletions

View File

@@ -52,6 +52,7 @@ ccflags-y += -DCONFIG_NVGPU_IOCTL_NON_FUSA
ccflags-y += -DCONFIG_NVGPU_NON_FUSA
ccflags-y += -DCONFIG_NVGPU_INJECT_HWERR
ccflags-y += -DCONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT
ccflags-y += -DCONFIG_NVGPU_SET_FALCON_ACCESS_MAP
ccflags-y += -DCONFIG_NVGPU_SW_SEMAPHORE
ccflags-y += -DCONFIG_NVGPU_FENCE

View File

@@ -185,6 +185,9 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_IOCTL_NON_FUSA
CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT
CONFIG_NVGPU_SET_FALCON_ACCESS_MAP := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SET_FALCON_ACCESS_MAP
# Enable SW Semaphore for normal build
CONFIG_NVGPU_SW_SEMAPHORE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SW_SEMAPHORE

View File

@@ -299,9 +299,11 @@ static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
u32 nr_pages =
DIV_ROUND_UP(NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_SIZE,
PAGE_SIZE);
u32 nr_pages_size = nvgpu_safe_mult_u32(PAGE_SIZE, nr_pages);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
u32 *whitelist = NULL;
u32 w, num_entries = 0U;
u32 nr_pages_size = nvgpu_safe_mult_u32(PAGE_SIZE, nr_pages);
#endif
mem = nvgpu_gr_global_ctx_buffer_get_mem(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP);
@@ -311,6 +313,7 @@ static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
nvgpu_memset(g, mem, 0, 0, nr_pages_size);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
g->ops.gr.init.get_access_map(g, &whitelist, &num_entries);
for (w = 0U; w < num_entries; w++) {
@@ -326,6 +329,7 @@ static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
+ map_shift);
nvgpu_mem_wr32(g, mem, (u64)map_byte / (u64)sizeof(u32), x);
}
#endif
return 0;
}

View File

@@ -77,6 +77,7 @@ void gm20b_gr_init_gpc_mmu(struct gk20a *g)
nvgpu_ltc_get_ltc_count(g));
}
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
{
@@ -119,6 +120,7 @@ void gm20b_gr_init_get_access_map(struct gk20a *g,
array_size = ARRAY_SIZE(wl_addr_gm20b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config)

View File

@@ -69,8 +69,10 @@ u32 gm20b_gr_init_get_patch_slots(struct gk20a *g,
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
void gm20b_gr_init_gpc_mmu(struct gk20a *g);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
u32 gm20b_gr_init_get_sm_id_size(void);

View File

@@ -37,6 +37,7 @@
#define GFXP_WFI_TIMEOUT_COUNT_DEFAULT 100000U
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
{
@@ -79,6 +80,7 @@ void gp10b_gr_init_get_access_map(struct gk20a *g,
array_size = ARRAY_SIZE(wl_addr_gp10b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config)

View File

@@ -51,8 +51,10 @@ void gp10b_gr_init_get_default_preemption_modes(
u32 *default_graphics_preempt_mode, u32 *default_compute_preempt_mode);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
int gp10b_gr_init_fs_state(struct gk20a *g);

View File

@@ -35,8 +35,10 @@ u32 gv11b_gr_init_get_nonpes_aware_tpc(struct gk20a *g, u32 gpc, u32 tpc,
void gv11b_gr_init_ecc_scrub_reg(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
void gv11b_gr_init_gpc_mmu(struct gk20a *g);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gv11b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,

View File

@@ -340,6 +340,7 @@ void gv11b_gr_init_gpc_mmu(struct gk20a *g)
g->ops.fb.mmu_debug_rd(g));
}
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gv11b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
{
@@ -382,6 +383,7 @@ void gv11b_gr_init_get_access_map(struct gk20a *g,
array_size = ARRAY_SIZE(wl_addr_gv11b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config)

View File

@@ -373,7 +373,9 @@ static const struct gpu_ops gm20b_ops = {
.pes_vsc_stream = gm20b_gr_init_pes_vsc_stream,
.gpc_mmu = gm20b_gr_init_gpc_mmu,
.fifo_access = gm20b_gr_init_fifo_access,
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
.get_access_map = gm20b_gr_init_get_access_map,
#endif
.get_sm_id_size = gm20b_gr_init_get_sm_id_size,
.sm_id_config = gm20b_gr_init_sm_id_config,
.sm_id_numbering = gm20b_gr_init_sm_id_numbering,

View File

@@ -427,7 +427,9 @@ static const struct gpu_ops gp10b_ops = {
.pes_vsc_stream = gm20b_gr_init_pes_vsc_stream,
.gpc_mmu = gm20b_gr_init_gpc_mmu,
.fifo_access = gm20b_gr_init_fifo_access,
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
.get_access_map = gp10b_gr_init_get_access_map,
#endif
.get_sm_id_size = gp10b_gr_init_get_sm_id_size,
.sm_id_config = gp10b_gr_init_sm_id_config,
.sm_id_numbering = gm20b_gr_init_sm_id_numbering,

View File

@@ -514,7 +514,9 @@ static const struct gpu_ops gv11b_ops = {
.pes_vsc_stream = gm20b_gr_init_pes_vsc_stream,
.gpc_mmu = gv11b_gr_init_gpc_mmu,
.fifo_access = gm20b_gr_init_fifo_access,
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
.get_access_map = gv11b_gr_init_get_access_map,
#endif
.get_sm_id_size = gp10b_gr_init_get_sm_id_size,
.sm_id_config = gv11b_gr_init_sm_id_config,
.sm_id_numbering = gv11b_gr_init_sm_id_numbering,

View File

@@ -543,7 +543,9 @@ static const struct gpu_ops tu104_ops = {
.pes_vsc_stream = gm20b_gr_init_pes_vsc_stream,
.gpc_mmu = gv11b_gr_init_gpc_mmu,
.fifo_access = gm20b_gr_init_fifo_access,
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
.get_access_map = gv11b_gr_init_get_access_map,
#endif
.get_sm_id_size = gp10b_gr_init_get_sm_id_size,
.sm_id_config = gv11b_gr_init_sm_id_config,
.sm_id_numbering = gv11b_gr_init_sm_id_numbering,

View File

@@ -745,8 +745,10 @@ struct gpu_ops {
void (*pes_vsc_stream)(struct gk20a *g);
void (*gpc_mmu)(struct gk20a *g);
void (*fifo_access)(struct gk20a *g, bool enable);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void (*get_access_map)(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
u32 (*get_sm_id_size)(void);
int (*sm_id_config)(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);