mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: gv100: consider floorswept FBPA for getting unicast list
In gr_gv11b/gk20a_create_priv_addr_table() we do not consider floorswept FBPAs and just calculate the unicast list assuming all FBPAs are present This generates incorrect list of unicast addresses Fix this introducing new HAL ops.gr.split_fbpa_broadcast_addr Set gr_gv100_get_active_fpba_mask() for GV100 Set gr_gk20a_split_fbpa_broadcast_addr() for rest of the chips gr_gv100_get_active_fpba_mask() will first get active FPBA mask and generate unicast list only for active FBPAs Bug 200398811 Jira NVGPU-556 Change-Id: Idd11d6e7ad7b6836525fe41509aeccf52038321f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1694444 GVS: Gerrit_Virtual_Submit Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b64dfdcf9e
commit
a0dfb2b911
@@ -472,6 +472,10 @@ struct gpu_ops {
|
||||
u32 *priv_addr_table,
|
||||
u32 *num_registers);
|
||||
u32 (*get_pmm_per_chiplet_offset)(void);
|
||||
void (*split_fbpa_broadcast_addr)(struct gk20a *g, u32 addr,
|
||||
u32 num_fbpas,
|
||||
u32 *priv_addr_table,
|
||||
u32 *priv_addr_table_index);
|
||||
} gr;
|
||||
struct {
|
||||
void (*init_hw)(struct gk20a *g);
|
||||
|
||||
@@ -6323,6 +6323,17 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
u32 num_fbpas,
|
||||
u32 *priv_addr_table, u32 *t)
|
||||
{
|
||||
u32 fbpa_id;
|
||||
|
||||
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++)
|
||||
priv_addr_table[(*t)++] = pri_fbpa_addr(g,
|
||||
pri_fbpa_addr_mask(g, addr), fbpa_id);
|
||||
}
|
||||
|
||||
int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
u32 gpc_num,
|
||||
u32 *priv_addr_table, u32 *t)
|
||||
@@ -6356,7 +6367,6 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
|
||||
u32 broadcast_flags;
|
||||
u32 t;
|
||||
int err;
|
||||
int fbpa_num;
|
||||
|
||||
t = 0;
|
||||
*num_registers = 0;
|
||||
@@ -6430,11 +6440,9 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
|
||||
g->ops.gr.split_ltc_broadcast_addr(g, addr,
|
||||
priv_addr_table, &t);
|
||||
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) {
|
||||
for (fbpa_num = 0;
|
||||
fbpa_num < nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
|
||||
fbpa_num++)
|
||||
priv_addr_table[t++] = pri_fbpa_addr(g,
|
||||
pri_fbpa_addr_mask(g, addr), fbpa_num);
|
||||
g->ops.gr.split_fbpa_broadcast_addr(g, addr,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS),
|
||||
priv_addr_table, &t);
|
||||
} else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_GPC)) {
|
||||
if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
|
||||
for (tpc_num = 0;
|
||||
|
||||
@@ -832,4 +832,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
|
||||
u32 addr,
|
||||
u32 *priv_addr_table,
|
||||
u32 *num_registers);
|
||||
void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
u32 num_fbpas,
|
||||
u32 *priv_addr_table, u32 *t);
|
||||
#endif /*__GR_GK20A_H__*/
|
||||
|
||||
@@ -324,6 +324,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gm20b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = fb_gk20a_reset,
|
||||
|
||||
@@ -387,6 +387,7 @@ static const struct gpu_ops gp106_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gm20b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = gp106_fb_reset,
|
||||
|
||||
@@ -355,6 +355,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gm20b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = fb_gk20a_reset,
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/gr_gk20a.h"
|
||||
#include "gk20a/gr_pri_gk20a.h"
|
||||
|
||||
#include "gv100/gr_gv100.h"
|
||||
#include "gv11b/subctx_gv11b.h"
|
||||
@@ -332,6 +333,23 @@ u32 gr_gv100_get_patch_slots(struct gk20a *g)
|
||||
return size;
|
||||
}
|
||||
|
||||
static u32 gr_gv100_get_active_fpba_mask(struct gk20a *g, u32 num_fbpas)
|
||||
{
|
||||
u32 active_fbpa_mask;
|
||||
|
||||
/*
|
||||
* Read active fbpa mask from fuse
|
||||
* Note that 0:enable and 1:disable in value read from fuse so we've to
|
||||
* flip the bits.
|
||||
* Also set unused bits to zero
|
||||
*/
|
||||
active_fbpa_mask = nvgpu_readl(g, fuse_status_opt_fbio_r());
|
||||
active_fbpa_mask = ~active_fbpa_mask;
|
||||
active_fbpa_mask = active_fbpa_mask & ((1 << num_fbpas) - 1);
|
||||
|
||||
return active_fbpa_mask;
|
||||
}
|
||||
|
||||
int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
|
||||
struct ctxsw_buf_offset_map_entry *map,
|
||||
struct aiv_list_gk20a *regs,
|
||||
@@ -348,15 +366,7 @@ int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
|
||||
if ((cnt + (regs->count * num_fbpas)) > max_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Read active fbpa mask from fuse
|
||||
* Note that 0:enable and 1:disable in value read from fuse so we've to
|
||||
* flip the bits.
|
||||
* Also set unused bits to zero
|
||||
*/
|
||||
active_fbpa_mask = nvgpu_readl(g, fuse_status_opt_fbio_r());
|
||||
active_fbpa_mask = ~active_fbpa_mask;
|
||||
active_fbpa_mask = active_fbpa_mask & ((1 << num_fbpas) - 1);
|
||||
active_fbpa_mask = gr_gv100_get_active_fpba_mask(g, num_fbpas);
|
||||
|
||||
for (idx = 0; idx < regs->count; idx++) {
|
||||
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
|
||||
@@ -383,3 +393,20 @@ int gr_gv100_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map,
|
||||
return gr_gk20a_add_ctxsw_reg_perf_pma(map, regs,
|
||||
count, offset, max_cnt, base, mask);
|
||||
}
|
||||
|
||||
void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
u32 num_fbpas,
|
||||
u32 *priv_addr_table, u32 *t)
|
||||
{
|
||||
u32 active_fbpa_mask;
|
||||
u32 fbpa_id;
|
||||
|
||||
active_fbpa_mask = gr_gv100_get_active_fpba_mask(g, num_fbpas);
|
||||
|
||||
for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
|
||||
if (active_fbpa_mask & BIT(fbpa_id)) {
|
||||
priv_addr_table[(*t)++] = pri_fbpa_addr(g,
|
||||
pri_fbpa_addr_mask(g, addr), fbpa_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,4 +43,7 @@ int gr_gv100_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map,
|
||||
struct aiv_list_gk20a *regs,
|
||||
u32 *count, u32 *offset,
|
||||
u32 max_cnt, u32 base, u32 mask);
|
||||
void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
|
||||
u32 num_fbpas,
|
||||
u32 *priv_addr_table, u32 *t);
|
||||
#endif
|
||||
|
||||
@@ -434,6 +434,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gv11b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = gv100_fb_reset,
|
||||
|
||||
@@ -4557,7 +4557,6 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
|
||||
u32 broadcast_flags;
|
||||
u32 t;
|
||||
int err;
|
||||
int fbpa_num;
|
||||
|
||||
t = 0;
|
||||
*num_registers = 0;
|
||||
@@ -4671,11 +4670,9 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
|
||||
g->ops.gr.split_ltc_broadcast_addr(g, addr,
|
||||
priv_addr_table, &t);
|
||||
} else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) {
|
||||
for (fbpa_num = 0;
|
||||
fbpa_num < nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS);
|
||||
fbpa_num++)
|
||||
priv_addr_table[t++] = pri_fbpa_addr(g,
|
||||
pri_fbpa_addr_mask(g, addr), fbpa_num);
|
||||
g->ops.gr.split_fbpa_broadcast_addr(g, addr,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS),
|
||||
priv_addr_table, &t);
|
||||
} else if ((addr_type == CTXSW_ADDR_TYPE_LTCS) &&
|
||||
(broadcast_flags & PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC)) {
|
||||
gr_gv11b_split_pmm_fbp_broadcast_address(g,
|
||||
|
||||
@@ -407,6 +407,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gv11b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = gv11b_fb_reset,
|
||||
|
||||
@@ -229,6 +229,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gm20b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = fb_gk20a_reset,
|
||||
|
||||
@@ -265,6 +265,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.get_pmm_per_chiplet_offset =
|
||||
gr_gv11b_get_pmm_per_chiplet_offset,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
},
|
||||
.fb = {
|
||||
.reset = gv11b_fb_reset,
|
||||
|
||||
Reference in New Issue
Block a user