gpu: nvgpu: update get_access_map

This patch re-names the variables used in gr_init_get_access_map API:
whitelist - gr_access_map
num_entries - gr_access_map_num_entries
wl_addr_*[] - gr_access_map_*[]

JIRA NVGPU-9849

Change-Id: I3a0a59410af8983867af5bc2f9ff200e56e190c4
Signed-off-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2891567
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Rajesh Devaraj
2023-04-20 13:16:39 +00:00
committed by mobile promotions
parent 9512b9f1de
commit 01d3ed09b0
10 changed files with 48 additions and 38 deletions

View File

@@ -359,8 +359,8 @@ static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
NVGPU_CPU_PAGE_SIZE);
u32 nr_pages_size = nvgpu_safe_mult_u32(NVGPU_CPU_PAGE_SIZE, nr_pages);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
u32 *whitelist = NULL;
u32 w, num_entries = 0U;
u32 *gr_access_map = NULL;
u32 w, gr_access_map_num_entries = 0U;
#endif
nvgpu_log(g, gpu_dbg_gr, " ");
@@ -374,15 +374,16 @@ static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
nvgpu_memset(g, mem, 0, 0, nr_pages_size);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
g->ops.gr.init.get_access_map(g, &whitelist, &num_entries);
g->ops.gr.init.get_access_map(g, &gr_access_map,
&gr_access_map_num_entries);
for (w = 0U; w < num_entries; w++) {
for (w = 0U; w < gr_access_map_num_entries; w++) {
u32 map_bit, map_byte, map_shift, x;
map_bit = whitelist[w] >> 2;
map_bit = gr_access_map[w] >> 2;
map_byte = map_bit >> 3;
map_shift = map_bit & 0x7U; /* i.e. 0-7 */
nvgpu_log_info(g, "access map addr:0x%x byte:0x%x bit:%d",
whitelist[w], map_byte, map_shift);
gr_access_map[w], map_byte, map_shift);
x = nvgpu_mem_rd32(g, mem, (u64)map_byte / (u64)sizeof(u32));
x |= BIT32(
(map_byte % (u32)sizeof(u32) * BITS_PER_BYTE_U32)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -105,9 +105,10 @@ int ga10b_gr_init_enable_mme_config_ptimer(struct gk20a *g,
#endif
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void ga10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
u32 **gr_access_map,
u32 *gr_access_map_num_entries)
{
static u32 wl_addr_ga10b[] = {
static u32 gr_access_map_ga10b[] = {
/* this list must be sorted (low to high) */
0x418380, /* gr_pri_gpcs_rasterarb_line_class */
0x418800, /* gr_pri_gpcs_setup_debug */
@@ -148,8 +149,8 @@ void ga10b_gr_init_get_access_map(struct gk20a *g,
size_t array_size;
(void)g;
*whitelist = wl_addr_ga10b;
array_size = ARRAY_SIZE(wl_addr_ga10b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
*gr_access_map = gr_access_map_ga10b;
array_size = ARRAY_SIZE(gr_access_map_ga10b);
*gr_access_map_num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -68,7 +68,8 @@ void ga10b_gr_init_commit_rops_crop_override(struct gk20a *g,
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void ga10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
u32 **gr_access_map,
u32 *gr_access_map_num_entries);
#endif
void ga10b_gr_init_fs_state(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -80,9 +80,10 @@ void gm20b_gr_init_gpc_mmu(struct gk20a *g)
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
u32 **gr_access_map,
u32 *gr_access_map_num_entries)
{
static u32 wl_addr_gm20b[] = {
static u32 gr_access_map_gm20b[] = {
/* this list must be sorted (low to high) */
0x404468, /* gr_pri_mme_max_instructions */
0x418380, /* gr_pri_gpcs_rasterarb_line_class */
@@ -124,9 +125,9 @@ void gm20b_gr_init_get_access_map(struct gk20a *g,
size_t array_size;
(void)g;
*whitelist = wl_addr_gm20b;
array_size = ARRAY_SIZE(wl_addr_gm20b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
*gr_access_map = gr_access_map_gm20b;
array_size = ARRAY_SIZE(gr_access_map_gm20b);
*gr_access_map_num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif

View File

@@ -86,7 +86,8 @@ bool gm20b_gr_init_is_allowed_sw_bundle(struct gk20a *g,
void gm20b_gr_init_gpc_mmu(struct gk20a *g);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
u32 **gr_access_map,
u32 *gr_access_map_num_entries);
#endif
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,9 +39,10 @@
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
u32 **gr_access_map,
u32 *gr_access_map_num_entries)
{
static u32 wl_addr_gp10b[] = {
static u32 gr_access_map_gp10b[] = {
/* this list must be sorted (low to high) */
0x404468, /* gr_pri_mme_max_instructions */
0x418380, /* gr_pri_gpcs_rasterarb_line_class */
@@ -79,9 +80,9 @@ void gp10b_gr_init_get_access_map(struct gk20a *g,
size_t array_size;
(void)g;
*whitelist = wl_addr_gp10b;
array_size = ARRAY_SIZE(wl_addr_gp10b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
*gr_access_map = gr_access_map_gp10b;
array_size = ARRAY_SIZE(gr_access_map_gp10b);
*gr_access_map_num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,7 +50,8 @@ void gp10b_gr_init_get_default_preemption_modes(
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
u32 **gr_access_map,
u32 *gr_access_map_num_entries);
#endif
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -52,9 +52,10 @@
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gv11b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries)
u32 **gr_access_map,
u32 *gr_access_map_num_entries)
{
static u32 wl_addr_gv11b[] = {
static u32 gr_access_map_gv11b[] = {
/* this list must be sorted (low to high) */
0x404468, /* gr_pri_mme_max_instructions */
0x418380, /* gr_pri_gpcs_rasterarb_line_class */
@@ -96,8 +97,8 @@ void gv11b_gr_init_get_access_map(struct gk20a *g,
size_t array_size;
(void)g;
*whitelist = wl_addr_gv11b;
array_size = ARRAY_SIZE(wl_addr_gv11b);
*num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
*gr_access_map = gr_access_map_gv11b;
array_size = ARRAY_SIZE(gr_access_map_gv11b);
*gr_access_map_num_entries = nvgpu_safe_cast_u64_to_u32(array_size);
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -45,7 +45,8 @@ int gv11b_gr_init_ecc_scrub_reg(struct gk20a *g,
void gv11b_gr_init_gpc_mmu(struct gk20a *g);
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void gv11b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
u32 **gr_access_map,
u32 *gr_access_map_num_entries);
#endif
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config,

View File

@@ -884,7 +884,8 @@ struct gops_gr_init {
#endif
#ifdef CONFIG_NVGPU_SET_FALCON_ACCESS_MAP
void (*get_access_map)(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
u32 **gr_access_map,
u32 *gr_access_map_num_entries);
#endif
#ifdef CONFIG_NVGPU_SM_DIVERSITY
int (*commit_sm_id_programming)(struct gk20a *g,