gpu: nvgpu: Add SM diversity support

To achieve permanent fault coverage, the CTAs launched by
each kernel in the mission and redundant contexts must execute on
different hardware resources. This feature proposes modifications
in the software to modify the virtual SM id to TPC mapping across
the mission and redundant contexts. The virtual SM identifier to TPC
mapping is done by nvgpu when setting up the patch context.

The recommendation for the redundant setting is to offset the
assignment by one TPC, and not by one GPC. This will ensure that both
GPC and TPC diversity. The SM and Quadrant diversity will happen
naturally. For kernels with few CTAs, the diversity is guaranteed
to be 100%. In case of completely random CTA allocation,
e.g. large number of CTAs in the waiting queue, the diversity is
1 - 1/#SM, or 87.5% for GV11B, 97.9% for TU104.

Added NvGpu CFLAGS to enable/disable the SM diversity support
"CONFIG_NVGPU_SM_DIVERSITY".

This support is only enabled on gv11b and tu104 QNX non safety build.

JIRA NVGPU-4685

Change-Id: I8e3eaa72d8cf7aff97f61e4c2abd10b2afe0fe8b
Signed-off-by: Lakshmanan M <lm@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2268026
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Lakshmanan M
2019-12-24 12:16:35 +05:30
committed by Alex Waterman
parent 7601d1c620
commit 1c991a58af
27 changed files with 504 additions and 74 deletions

View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -221,6 +221,10 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CHANNEL_TSG_CONTROL
# Enable Virtualization server for normal build
NVGPU_COMMON_CFLAGS += -DCONFIG_TEGRA_GR_VIRTUALIZATION_SERVER
# Enable SM diversity support for normal build
CONFIG_NVGPU_SM_DIVERSITY := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SM_DIVERSITY
endif
endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -699,6 +699,11 @@ int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid)
goto clean_up;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
nvgpu_gr_ctx_set_sm_diversity_config(tsg->gr_ctx,
NVGPU_INVALID_SM_CONFIG_ID);
#endif
if (g->ops.tsg.init_eng_method_buffers != NULL) {
err = g->ops.tsg.init_eng_method_buffers(g, tsg);
if (err != 0) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -447,6 +447,19 @@ struct nvgpu_mem *nvgpu_gr_ctx_get_ctx_mem(struct nvgpu_gr_ctx *gr_ctx)
return &gr_ctx->mem;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
void nvgpu_gr_ctx_set_sm_diversity_config(struct nvgpu_gr_ctx *gr_ctx,
u32 sm_diversity_config)
{
gr_ctx->sm_diversity_config = sm_diversity_config;
}
u32 nvgpu_gr_ctx_get_sm_diversity_config(struct nvgpu_gr_ctx *gr_ctx)
{
return gr_ctx->sm_diversity_config;
}
#endif
/* load saved fresh copy of gloden image into channel gr_ctx */
void nvgpu_gr_ctx_load_golden_ctx_image(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
@@ -547,12 +560,22 @@ void nvgpu_gr_ctx_patch_write(struct gk20a *g,
u32 addr, u32 data, bool patch)
{
if (patch) {
u32 patch_slot =
u32 patch_slot;
u64 patch_slot_max;
if (gr_ctx == NULL) {
nvgpu_err(g,
"failed to access gr_ctx[NULL] but patch true");
return;
}
patch_slot =
nvgpu_safe_mult_u32(gr_ctx->patch_ctx.data_count,
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY);
u64 patch_slot_max =
patch_slot_max =
nvgpu_safe_sub_u64(
PATCH_CTX_ENTRIES_FROM_SIZE(gr_ctx->patch_ctx.mem.size),
PATCH_CTX_ENTRIES_FROM_SIZE(
gr_ctx->patch_ctx.mem.size),
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY);
if (patch_slot > patch_slot_max) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -172,6 +172,16 @@ struct nvgpu_gr_ctx {
* TSG identifier corresponding to the graphics context.
*/
u32 tsgid;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/** SM diversity configuration offset.
* It is valid only if NVGPU_SUPPORT_SM_DIVERSITY support is true.
* else input param is just ignored.
* A valid offset starts from 0 to
* (#gk20a.max_sm_diversity_config_count - 1).
*/
u32 sm_diversity_config;
#endif
};
#endif /* NVGPU_GR_CTX_PRIV_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,7 +37,7 @@ static int gr_load_sm_id_config(struct gk20a *g, struct nvgpu_gr_config *config)
return -ENOMEM;
}
err = g->ops.gr.init.sm_id_config(g, tpc_sm_id, config);
err = g->ops.gr.init.sm_id_config(g, tpc_sm_id, config, NULL, false);
nvgpu_kfree(g, tpc_sm_id);
@@ -120,7 +120,7 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
gpc_index = nvgpu_gr_config_get_sm_info_gpc_index(sm_info);
g->ops.gr.init.sm_id_numbering(g, gpc_index, tpc_index, sm_id,
config);
config, NULL, false);
}
g->ops.gr.init.pd_tpc_per_gpc(g, config);

View File

@@ -211,6 +211,17 @@ static bool gr_config_alloc_struct_mem(struct gk20a *g,
nvgpu_err(g, "sm_to_cluster == NULL");
goto alloc_err;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) {
config->sm_to_cluster_redex_config =
nvgpu_kzalloc(g, sm_info_size);
if (config->sm_to_cluster_redex_config == NULL) {
nvgpu_err(g, "sm_to_cluster_redex_config == NULL");
goto clean_alloc_mem;
}
}
#endif
config->no_of_sm = 0;
gpc_size = nvgpu_safe_mult_u64((size_t)config->gpc_count, sizeof(u32));
@@ -249,7 +260,12 @@ static bool gr_config_alloc_struct_mem(struct gk20a *g,
clean_alloc_mem:
nvgpu_kfree(g, config->sm_to_cluster);
config->sm_to_cluster = NULL;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (config->sm_to_cluster_redex_config != NULL) {
nvgpu_kfree(g, config->sm_to_cluster_redex_config);
config->sm_to_cluster_redex_config = NULL;
}
#endif
gr_config_free_mem(g, config);
alloc_err:
@@ -583,6 +599,12 @@ void nvgpu_gr_config_deinit(struct gk20a *g, struct nvgpu_gr_config *config)
nvgpu_kfree(g, config->map_tiles);
#endif
nvgpu_kfree(g, config->sm_to_cluster);
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (config->sm_to_cluster_redex_config != NULL) {
nvgpu_kfree(g, config->sm_to_cluster_redex_config);
config->sm_to_cluster_redex_config = NULL;
}
#endif
}
u32 nvgpu_gr_config_get_max_gpc_count(struct nvgpu_gr_config *config)
@@ -701,6 +723,14 @@ struct nvgpu_sm_info *nvgpu_gr_config_get_sm_info(struct nvgpu_gr_config *config
return &config->sm_to_cluster[sm_id];
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
struct nvgpu_sm_info *nvgpu_gr_config_get_redex_sm_info(
struct nvgpu_gr_config *config, u32 sm_id)
{
return &config->sm_to_cluster_redex_config[sm_id];
}
#endif
u32 nvgpu_gr_config_get_sm_info_gpc_index(struct nvgpu_sm_info *sm_info)
{
return sm_info->gpc_index;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -150,6 +150,13 @@ struct nvgpu_gr_config {
* Pointer to SM information struct.
*/
struct nvgpu_sm_info *sm_to_cluster;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/**
* Pointer to redundant execution config SM information struct.
* It is valid only if NVGPU_SUPPORT_SM_DIVERSITY support is true.
*/
struct nvgpu_sm_info *sm_to_cluster_redex_config;
#endif
#ifdef CONFIG_NVGPU_GRAPHICS
u32 max_zcull_per_gpc_count;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -363,6 +363,22 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
}
#endif
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if ((nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) &&
(nvgpu_gr_ctx_get_sm_diversity_config(gr_ctx) !=
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG) &&
(g->ops.gr.init.commit_sm_id_programming != NULL)) {
int err;
err = g->ops.gr.init.commit_sm_id_programming(
g, config, gr_ctx, patch);
if (err != 0) {
nvgpu_err(g,
"commit_sm_id_programming failed err=%d", err);
}
}
#endif
if (patch) {
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, false);
}

View File

@@ -1,7 +1,7 @@
/*
* Virtualized GPU Graphics
*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -74,6 +74,11 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
p->as_handle = vm->handle;
p->gr_ctx_va = gr_ctx->mem.gpu_va;
p->tsg_id = gr_ctx->tsgid;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
p->sm_diversity_config = gr_ctx->sm_diversity_config;
#else
p->sm_diversity_config = NVGPU_DEFAULT_SM_DIVERSITY_CONFIG;
#endif
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
@@ -132,7 +137,7 @@ int vgpu_gr_alloc_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
nvgpu_log_fn(g, " ");
patch_ctx = &gr_ctx->patch_ctx;
patch_ctx->mem.size = 128 * sizeof(u32);
patch_ctx->mem.size = 1024 * sizeof(u32);
patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size,
GMMU_PAGE_SIZE_KERNEL);

View File

@@ -1,7 +1,7 @@
/*
* Virtualized GPU Graphics
*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -386,6 +386,20 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct nvgpu_gr *gr)
goto cleanup;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) {
config->sm_to_cluster_redex_config =
nvgpu_kzalloc(g, config->gpc_count *
config->max_tpc_per_gpc_count *
sm_per_tpc *
sizeof(struct nvgpu_sm_info));
if (config->sm_to_cluster_redex_config == NULL) {
nvgpu_err(g, "sm_to_cluster_redex_config == NULL");
goto cleanup;
}
}
#endif
config->tpc_count = 0;
for (gpc_index = 0; gpc_index < config->gpc_count; gpc_index++) {
config->gpc_tpc_count[gpc_index] =
@@ -475,6 +489,18 @@ cleanup:
nvgpu_kfree(g, config->gpc_tpc_mask);
config->gpc_tpc_mask = NULL;
if (config->sm_to_cluster != NULL) {
nvgpu_kfree(g, config->sm_to_cluster);
config->sm_to_cluster = NULL;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (config->sm_to_cluster_redex_config != NULL) {
nvgpu_kfree(g, config->sm_to_cluster_redex_config);
config->sm_to_cluster_redex_config = NULL;
}
#endif
return err;
}
@@ -669,6 +695,13 @@ static void vgpu_remove_gr_support(struct gk20a *g)
nvgpu_kfree(gr->g, gr->config->sm_to_cluster);
gr->config->sm_to_cluster = NULL;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (gr->config->sm_to_cluster_redex_config != NULL) {
nvgpu_kfree(g, gr->config->sm_to_cluster_redex_config);
gr->config->sm_to_cluster_redex_config = NULL;
}
#endif
nvgpu_gr_config_deinit(gr->g, gr->config);
#ifdef CONFIG_NVGPU_GRAPHICS
@@ -1140,6 +1173,7 @@ int vgpu_gr_init_sm_id_table(struct gk20a *g, struct nvgpu_gr_config *gr_config)
void *handle = NULL;
u32 sm_id;
u32 max_sm;
u32 sm_config;
msg.cmd = TEGRA_VGPU_CMD_GET_VSMS_MAPPING;
msg.handle = vgpu_get_handle(g);
@@ -1165,18 +1199,31 @@ int vgpu_gr_init_sm_id_table(struct gk20a *g, struct nvgpu_gr_config *gr_config)
return -EINVAL;
}
if ((p->num_sm * sizeof(*entry)) > oob_size) {
if ((p->num_sm * sizeof(*entry) *
priv->constants.max_sm_diversity_config_count) > oob_size) {
return -EINVAL;
}
gr_config->no_of_sm = p->num_sm;
for (sm_config = NVGPU_DEFAULT_SM_DIVERSITY_CONFIG;
sm_config < priv->constants.max_sm_diversity_config_count;
sm_config++) {
for (sm_id = 0; sm_id < p->num_sm; sm_id++, entry++) {
#ifdef CONFIG_NVGPU_SM_DIVERSITY
sm_info =
((sm_config == NVGPU_DEFAULT_SM_DIVERSITY_CONFIG) ?
nvgpu_gr_config_get_sm_info(gr_config, sm_id) :
nvgpu_gr_config_get_redex_sm_info(
gr_config, sm_id));
#else
sm_info = nvgpu_gr_config_get_sm_info(gr_config, sm_id);
#endif
sm_info->tpc_index = entry->tpc_index;
sm_info->gpc_index = entry->gpc_index;
sm_info->sm_index = entry->sm_index;
sm_info->global_tpc_index = entry->global_tpc_index;
}
}
vgpu_ivc_oob_put_ptr(handle);
return 0;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -318,6 +318,45 @@ exit_perf_err:
return err;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
static void gv100_gr_config_set_redex_sminfo(struct gk20a *g,
struct nvgpu_gr_config *gr_config, u32 num_sm,
u32 sm_per_tpc, u32 *gpc_table, u32 *tpc_table)
{
u32 sm;
u32 tpc = nvgpu_gr_config_get_tpc_count(gr_config);
u32 sm_id = 0;
u32 glboal_index = 0;
for (sm_id = 0; sm_id < num_sm; sm_id += sm_per_tpc) {
tpc = nvgpu_safe_sub_u32(tpc, 1U);
for (sm = 0; sm < sm_per_tpc; sm++) {
u32 index = nvgpu_safe_add_u32(sm_id, sm);
struct nvgpu_sm_info *sm_info =
nvgpu_gr_config_get_redex_sm_info(
gr_config, index);
nvgpu_gr_config_set_sm_info_gpc_index(sm_info,
gpc_table[tpc]);
nvgpu_gr_config_set_sm_info_tpc_index(sm_info,
tpc_table[tpc]);
nvgpu_gr_config_set_sm_info_sm_index(sm_info, sm);
nvgpu_gr_config_set_sm_info_global_tpc_index(
sm_info, glboal_index);
nvgpu_log_info(g,
"gpc : %d tpc %d sm_index %d global_index: %d",
nvgpu_gr_config_get_sm_info_gpc_index(sm_info),
nvgpu_gr_config_get_sm_info_tpc_index(sm_info),
nvgpu_gr_config_get_sm_info_sm_index(sm_info),
nvgpu_gr_config_get_sm_info_global_tpc_index(
sm_info));
}
glboal_index = nvgpu_safe_add_u32(glboal_index, 1U);
}
}
#endif
static void gv100_gr_config_set_sminfo(struct gk20a *g,
struct nvgpu_gr_config *gr_config, u32 num_sm,
u32 sm_per_tpc, u32 *gpc_table, u32 *tpc_table)
@@ -348,6 +387,13 @@ static void gv100_gr_config_set_sminfo(struct gk20a *g,
}
tpc = nvgpu_safe_add_u32(tpc, 1U);
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY)) {
gv100_gr_config_set_redex_sminfo(g, gr_config, num_sm,
sm_per_tpc, gpc_table, tpc_table);
}
#endif
}
int gv100_gr_config_init_sm_id_table(struct gk20a *g,

View File

@@ -1,7 +1,7 @@
/*
* GV11b GPU GR
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -849,6 +849,14 @@ int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
unsigned int i = 0, sm_id;
u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g);
int err;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
nvgpu_err(g, "gv11b_gr_set_sm_debug_mode failed=>tsg NULL");
return -EINVAL;
}
#endif
ops = nvgpu_kcalloc(g, no_of_sm, sizeof(*ops));
if (ops == NULL) {
@@ -863,7 +871,20 @@ int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
continue;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (nvgpu_gr_ctx_get_sm_diversity_config(tsg->gr_ctx) ==
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG) {
sm_info =
nvgpu_gr_config_get_sm_info(
g->gr->config, sm_id);
} else {
sm_info =
nvgpu_gr_config_get_redex_sm_info(
g->gr->config, sm_id);
}
#else
sm_info = nvgpu_gr_config_get_sm_info(g->gr->config, sm_id);
#endif
gpc = nvgpu_gr_config_get_sm_info_gpc_index(sm_info);
if (g->ops.gr.init.get_nonpes_aware_tpc != NULL) {
tpc = g->ops.gr.init.get_nonpes_aware_tpc(g,
@@ -2110,8 +2131,21 @@ int gv11b_gr_clear_sm_error_state(struct gk20a *g,
}
if (gk20a_is_channel_ctx_resident(ch)) {
struct nvgpu_sm_info *sm_info =
nvgpu_gr_config_get_sm_info(g->gr->config, sm_id);
struct nvgpu_sm_info *sm_info;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if (nvgpu_gr_ctx_get_sm_diversity_config(tsg->gr_ctx) ==
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG) {
sm_info =
nvgpu_gr_config_get_sm_info(
g->gr->config, sm_id);
} else {
sm_info =
nvgpu_gr_config_get_redex_sm_info(
g->gr->config, sm_id);
}
#else
sm_info = nvgpu_gr_config_get_sm_info(g->gr->config, sm_id);
#endif
gpc = nvgpu_gr_config_get_sm_info_gpc_index(sm_info);
if (g->ops.gr.init.get_nonpes_aware_tpc != NULL) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -125,7 +125,9 @@ void gm20b_gr_init_get_access_map(struct gk20a *g,
#endif
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config)
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
@@ -153,7 +155,9 @@ u32 gm20b_gr_init_get_sm_id_size(void)
}
int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config)
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 i, j;
u32 tpc_index, gpc_index;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -74,10 +74,14 @@ void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
u32 gm20b_gr_init_get_sm_id_size(void);
int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
void gm20b_gr_init_fs_state(struct gk20a *g);
void gm20b_gr_init_commit_global_timeslice(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -83,7 +83,9 @@ void gp10b_gr_init_get_access_map(struct gk20a *g,
#endif
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config)
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 i, j;
u32 tpc_index, gpc_index;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -56,7 +56,9 @@ void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void gp10b_gr_init_fs_state(struct gk20a *g);
int gp10b_gr_init_preemption_state(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -40,9 +40,13 @@ void gv11b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void gv11b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
void gv11b_gr_init_fs_state(struct gk20a *g);
int gv11b_gr_init_preemption_state(struct gk20a *g);
@@ -62,6 +66,13 @@ void gv11b_gr_init_commit_global_attrib_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr,
bool patch);
#ifdef CONFIG_NVGPU_SM_DIVERSITY
int gv11b_gr_init_commit_sm_id_programming(struct gk20a *g,
struct nvgpu_gr_config *config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
#endif
int gv11b_gr_init_load_sw_veid_bundle(struct gk20a *g,
struct netlist_av_list *sw_veid_bundle_init);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -387,7 +387,9 @@ void gv11b_gr_init_gpc_mmu(struct gk20a *g)
}
void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config)
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
@@ -396,9 +398,17 @@ void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
u32 global_tpc_index;
u32 tpc_offset;
u32 offset_sum = 0U;
struct nvgpu_sm_info *sm_info =
nvgpu_gr_config_get_sm_info(gr_config, smid);
struct nvgpu_sm_info *sm_info;
#ifdef CONFIG_NVGPU_SM_DIVERSITY
sm_info = (((gr_ctx == NULL) ||
(nvgpu_gr_ctx_get_sm_diversity_config(gr_ctx) ==
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG)) ?
nvgpu_gr_config_get_sm_info(gr_config, smid) :
nvgpu_gr_config_get_redex_sm_info(gr_config, smid));
#else
sm_info = nvgpu_gr_config_get_sm_info(gr_config, smid);
#endif
global_tpc_index =
nvgpu_gr_config_get_sm_info_global_tpc_index(sm_info);
@@ -406,21 +416,27 @@ void gv11b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc);
offset_sum = nvgpu_safe_add_u32(gpc_offset, tpc_offset);
nvgpu_writel(g,
nvgpu_gr_ctx_patch_write(g, gr_ctx,
nvgpu_safe_add_u32(gr_gpc0_tpc0_sm_cfg_r(), offset_sum),
gr_gpc0_tpc0_sm_cfg_tpc_id_f(global_tpc_index));
nvgpu_writel(g,
gr_gpc0_tpc0_sm_cfg_tpc_id_f(global_tpc_index),
patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
nvgpu_safe_add_u32(
gr_gpc0_gpm_pd_sm_id_r(tpc), gpc_offset),
gr_gpc0_gpm_pd_sm_id_id_f(global_tpc_index));
nvgpu_writel(g,
gr_gpc0_gpm_pd_sm_id_id_f(global_tpc_index),
patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
nvgpu_safe_add_u32(
gr_gpc0_tpc0_pe_cfg_smid_r(), offset_sum),
gr_gpc0_tpc0_pe_cfg_smid_value_f(global_tpc_index));
gr_gpc0_tpc0_pe_cfg_smid_value_f(global_tpc_index),
patch);
}
int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config)
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 i, j;
u32 tpc_index, gpc_index, tpc_id;
@@ -451,8 +467,22 @@ int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
if (sm_id >= no_of_sm) {
break;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
if ((gr_ctx == NULL) ||
nvgpu_gr_ctx_get_sm_diversity_config(gr_ctx) ==
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG) {
sm_info =
nvgpu_gr_config_get_sm_info(
gr_config, sm_id);
} else {
sm_info =
nvgpu_gr_config_get_redex_sm_info(
gr_config, sm_id);
}
#else
sm_info =
nvgpu_gr_config_get_sm_info(gr_config, sm_id);
#endif
gpc_index =
nvgpu_gr_config_get_sm_info_gpc_index(sm_info);
tpc_index =
@@ -470,11 +500,17 @@ int gv11b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
tpc_index,
bit_stride));
}
nvgpu_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_cwd_gpc_tpc_id_r(i),
reg,
patch);
}
for (i = 0U; i < gr_cwd_sm_id__size_1_v(); i++) {
nvgpu_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_cwd_sm_id_r(i),
tpc_sm_id[i],
patch);
}
return 0;
@@ -694,6 +730,55 @@ void gv11b_gr_init_commit_global_attrib_cb(struct gk20a *g,
gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(), patch);
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
int gv11b_gr_init_commit_sm_id_programming(struct gk20a *g,
struct nvgpu_gr_config *config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch)
{
u32 sm_id = 0;
u32 tpc_index, gpc_index;
int err = 0;
u32 *tpc_sm_id;
u32 sm_id_size = g->ops.gr.init.get_sm_id_size();
for (sm_id = 0; sm_id < g->ops.gr.init.get_no_of_sm(g);
sm_id++) {
struct nvgpu_sm_info *sm_info =
((gr_ctx == NULL) ||
(nvgpu_gr_ctx_get_sm_diversity_config(gr_ctx) ==
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG)) ?
nvgpu_gr_config_get_sm_info(
config, sm_id) :
nvgpu_gr_config_get_redex_sm_info(
config, sm_id);
tpc_index = nvgpu_gr_config_get_sm_info_tpc_index(sm_info);
gpc_index = nvgpu_gr_config_get_sm_info_gpc_index(sm_info);
g->ops.gr.init.sm_id_numbering(g, gpc_index, tpc_index, sm_id,
config, gr_ctx, patch);
}
tpc_sm_id = nvgpu_kcalloc(g, sm_id_size, sizeof(u32));
if (tpc_sm_id == NULL) {
nvgpu_err(g,
"gv11b_gr_init_commit_sm_id_programming failed");
return -ENOMEM;
}
err = g->ops.gr.init.sm_id_config(g, tpc_sm_id, config, gr_ctx, patch);
if (err != 0) {
nvgpu_err(g,
"gv11b_gr_init_commit_sm_id_programming failed err=%d",
err);
}
nvgpu_kfree(g, tpc_sm_id);
return err;
}
#endif
static int gv11b_gr_init_write_bundle_veid_state(struct gk20a *g, u32 index,
struct netlist_av_list *sw_veid_bundle_init)
{

View File

@@ -1,7 +1,7 @@
/*
* GV11B Tegra HAL interface
*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -608,6 +608,10 @@ static const struct gpu_ops gv11b_ops = {
gv11b_gr_init_commit_global_attrib_cb,
.commit_global_cb_manager =
gp10b_gr_init_commit_global_cb_manager,
#ifdef CONFIG_NVGPU_SM_DIVERSITY
.commit_sm_id_programming =
gv11b_gr_init_commit_sm_id_programming,
#endif
.pipe_mode_override = gm20b_gr_init_pipe_mode_override,
.load_sw_bundle_init =
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
@@ -1570,7 +1574,7 @@ int gv11b_init_hal(struct gk20a *g)
*/
nvgpu_set_enabled(g, NVGPU_MM_BYPASSES_IOMMU, true);
#ifndef CONFIG_NVGPU_BUILD_CONFIGURATION_IS_SAFETY
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/*
* To achieve permanent fault coverage, the CTAs launched by each kernel
* in the mission and redundant contexts must execute on different

View File

@@ -1,7 +1,7 @@
/*
* TU104 Tegra HAL interface
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -621,6 +621,10 @@ static const struct gpu_ops tu104_ops = {
gv11b_gr_init_commit_global_attrib_cb,
.commit_global_cb_manager =
gp10b_gr_init_commit_global_cb_manager,
#ifdef CONFIG_NVGPU_SM_DIVERSITY
.commit_sm_id_programming =
gv11b_gr_init_commit_sm_id_programming,
#endif
.pipe_mode_override = gm20b_gr_init_pipe_mode_override,
.load_sw_bundle_init =
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
@@ -1679,6 +1683,7 @@ int tu104_init_hal(struct gk20a *g)
*/
nvgpu_set_enabled(g, NVGPU_SUPPORT_COPY_ENGINE_DIVERSITY, true);
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/*
* To achieve permanent fault coverage, the CTAs launched by each kernel
* in the mission and redundant contexts must execute on different
@@ -1706,7 +1711,10 @@ int tu104_init_hal(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY, true);
g->max_sm_diversity_config_count =
NVGPU_MAX_SM_DIVERSITY_CONFIG_COUNT;
#else
g->max_sm_diversity_config_count =
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG_COUNT;
#endif
/* for now */
gops->clk.support_pmgr_domain = false;
gops->clk.support_lpwr_pg = false;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -139,6 +139,7 @@
#ifdef CONFIG_NVGPU_GRAPHICS
#include <nvgpu/gr/zbc.h>
#endif
#include <nvgpu/gr/setup.h>
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
@@ -1045,6 +1046,7 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
gops->clk_arb.get_arbiter_clk_domains = NULL;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/*
* To achieve permanent fault coverage, the CTAs launched by each kernel
* in the mission and redundant contexts must execute on different
@@ -1072,7 +1074,10 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
if (priv->constants.max_sm_diversity_config_count > 1U) {
nvgpu_set_enabled(g, NVGPU_SUPPORT_SM_DIVERSITY, true);
}
#else
priv->constants.max_sm_diversity_config_count =
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG_COUNT;
#endif
g->max_sm_diversity_config_count =
priv->constants.max_sm_diversity_config_count;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -621,10 +621,14 @@ struct gops_gr_init {
void (*gpc_mmu)(struct gk20a *g);
u32 (*get_sm_id_size)(void);
int (*sm_id_config)(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void (*sm_id_numbering)(struct gk20a *g, u32 gpc,
u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
struct nvgpu_gr_config *gr_config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void (*tpc_mask)(struct gk20a *g,
u32 gpc_index, u32 pes_tpc_mask);
void (*fs_state)(struct gk20a *g);
@@ -697,6 +701,12 @@ struct gops_gr_init {
void (*get_access_map)(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
#endif
#ifdef CONFIG_NVGPU_SM_DIVERSITY
int (*commit_sm_id_programming)(struct gk20a *g,
struct nvgpu_gr_config *config,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
#endif
#ifdef CONFIG_NVGPU_GRAPHICS
u32 (*get_ctx_attrib_cb_size)(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -340,6 +340,28 @@ void nvgpu_gr_config_set_no_of_sm(struct nvgpu_gr_config *config, u32 no_of_sm);
struct nvgpu_sm_info *nvgpu_gr_config_get_sm_info(struct nvgpu_gr_config *config,
u32 sm_id);
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/**
* @brief Get information of given SM.
*
* @param config [in] Pointer to GR configuration struct.
* @param sm_id [in] SM index.
*
* common.gr unit stores redundant execution config information of each SM
* into an array of struct #nvgpu_sm_info. This information includes GPC/TPC
* indexes for particular SM, and index of SM within TPC.
*
* This config is valid only if NVGPU_SUPPORT_SM_DIVERSITY support is true.
*
* This function will return pointer to #nvgpu_sm_info struct for SM with
* requested index.
*
* @return pointer to struct #nvgpu_sm_info
*/
struct nvgpu_sm_info *nvgpu_gr_config_get_redex_sm_info(
struct nvgpu_gr_config *config, u32 sm_id);
#endif
/**
* @brief Get GPC index of SM.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,11 @@
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/gr/global_ctx.h>
#define NVGPU_INVALID_SM_CONFIG_ID (U32_MAX)
/** Supports only mission (default) context. */
#define NVGPU_DEFAULT_SM_DIVERSITY_CONFIG 0U
/**
* @file
*
@@ -291,16 +296,38 @@ struct nvgpu_mem *nvgpu_gr_ctx_get_patch_ctx_mem(struct nvgpu_gr_ctx *gr_ctx);
void nvgpu_gr_ctx_set_patch_ctx_data_count(struct nvgpu_gr_ctx *gr_ctx,
u32 data_count);
/**
* @brief Get sm diversity config of the given graphics context.
*
* @param gr_ctx [in] Pointer to graphics context struct.
*
* This function returns #sm_diversity_config of graphics context struct.
*
* @return sm diversity config of the given graphics context.
*/
struct nvgpu_mem *nvgpu_gr_ctx_get_ctx_mem(struct nvgpu_gr_ctx *gr_ctx);
#ifdef CONFIG_NVGPU_SM_DIVERSITY
/**
* @brief Set sm diversity config in the given graphics context struct.
*
* @param gr_ctx [in] Pointer to graphics context struct.
* @param sm_diversity_config [in] Value to be set.
*
* This function sets sm diversity config of the given graphics context struct.
*/
void nvgpu_gr_ctx_set_sm_diversity_config(struct nvgpu_gr_ctx *gr_ctx,
u32 sm_diversity_config);
/**
* @brief Get pointer of graphics context buffer memory struct.
*
* @param gr_ctx [in] Pointer to graphics context struct.
*
* This function returns #nvgpu_mem pointer of graphics context buffer.
*
* @return pointer to graphics context buffer memory struct.
* @return sm diversity config of the given graphics context struct.
*/
struct nvgpu_mem *nvgpu_gr_ctx_get_ctx_mem(struct nvgpu_gr_ctx *gr_ctx);
u32 nvgpu_gr_ctx_get_sm_diversity_config(struct nvgpu_gr_ctx *gr_ctx);
#endif
/**
* @brief Load local golden image into given graphics context buffer.

View File

@@ -1,7 +1,7 @@
/*
* Tegra GPU Virtualization Interfaces to Server
*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -405,6 +405,7 @@ struct tegra_vgpu_gr_ctx_params {
u64 gr_ctx_va;
u32 class_num;
u32 tsg_id;
u32 sm_diversity_config;
};
struct tegra_vgpu_tsg_bind_unbind_channel_params {

View File

@@ -1,7 +1,7 @@
/*
* GK20A Graphics channel
*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1140,6 +1140,24 @@ long gk20a_channel_ioctl(struct file *filp,
__func__, cmd);
break;
}
#ifdef CONFIG_NVGPU_SM_DIVERSITY
{
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
err = -EINVAL;
break;
}
if (nvgpu_gr_ctx_get_sm_diversity_config(tsg->gr_ctx) ==
NVGPU_INVALID_SM_CONFIG_ID) {
nvgpu_gr_ctx_set_sm_diversity_config(tsg->gr_ctx,
NVGPU_DEFAULT_SM_DIVERSITY_CONFIG);
}
}
#endif
err = nvgpu_ioctl_channel_alloc_obj_ctx(ch, args->class_num, args->flags);
gk20a_idle(ch->g);
break;

View File

@@ -509,7 +509,7 @@ static int test_gr_init_hal_sm_id_config(struct gk20a *g)
config->tpc_count = 2;
config->no_of_sm = 4;
err = g->ops.gr.init.sm_id_config(g, tpc_sm_id, config);
err = g->ops.gr.init.sm_id_config(g, tpc_sm_id, config, NULL, false);
if (err != 0) {
return UNIT_FAIL;
}