gpu: nvgpu: Remove TSG required flag

Remove nvgpu internal flag indicating that TSGs are required. We now
require TSGs always. This also fixes a regression where CE channels
were back to using bare channels on gp106.

Bug 1842197

Change-Id: Id359e5a455fb324278636bb8994b583936490ffd
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1628481
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-12-28 14:08:25 -08:00
committed by mobile promotions
parent 4f67a794dd
commit 14fa8207e2
9 changed files with 9 additions and 32 deletions

View File

@@ -182,8 +182,6 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
platform->unified_memory);
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces);
__nvgpu_set_enabled(g, NVGPU_MM_CE_TSG_REQUIRED,
platform->tsg_required);
nvgpu_mutex_init(&g->mm.tlb_lock);
nvgpu_mutex_init(&g->mm.priv_lock);

View File

@@ -244,7 +244,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
.vbios_min_version = 0x88001e00,
.hardcode_sw_threshold = false,
.run_preos = true,
.tsg_required = true,
},
{ /* DEVICE=PG503 SKU 200 ES */
/* ptimer src frequency in hz */
@@ -279,7 +278,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
.vbios_min_version = 0x88001e00,
.hardcode_sw_threshold = false,
.run_preos = true,
.tsg_required = true,
},
{
/* ptimer src frequency in hz */
@@ -314,7 +312,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
.vbios_min_version = 0x88000126,
.hardcode_sw_threshold = false,
.run_preos = true,
.tsg_required = true,
},
{ /* SKU250 */
/* ptimer src frequency in hz */
@@ -349,7 +346,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
.vbios_min_version = 0x1,
.hardcode_sw_threshold = false,
.run_preos = true,
.tsg_required = true,
},
};

View File

@@ -224,9 +224,6 @@ struct gk20a_platform {
/* unified or split memory with separate vidmem? */
bool unified_memory;
/* true if all channels must be in TSG */
bool tsg_required;
/* minimum supported VBIOS version */
u32 vbios_min_version;

View File

@@ -972,6 +972,4 @@ struct gk20a_platform gm20b_tegra_platform = {
.soc_name = "tegra21x",
.unified_memory = true,
.tsg_required = true,
};

View File

@@ -417,8 +417,6 @@ struct gk20a_platform gp10b_tegra_platform = {
.soc_name = "tegra18x",
.unified_memory = true,
.tsg_required = true,
};

View File

@@ -222,8 +222,6 @@ struct gk20a_platform t19x_gpu_tegra_platform = {
.honors_aperture = true,
.unified_memory = true,
.tsg_required = true,
.reset_assert = gp10b_tegra_reset_assert,
.reset_deassert = gp10b_tegra_reset_deassert,
};

View File

@@ -66,6 +66,4 @@ struct gk20a_platform vgpu_tegra_platform = {
.devfreq_governor = "userspace",
.virtual_dev = true,
.tsg_required = true,
};

View File

@@ -456,14 +456,12 @@ u32 gk20a_ce_create_context(struct gk20a *g,
ce_ctx->vm = g->mm.ce.vm;
if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
/* allocate a tsg if needed */
ce_ctx->tsg = gk20a_tsg_open(g);
/* allocate a tsg if needed */
ce_ctx->tsg = gk20a_tsg_open(g);
if (!ce_ctx->tsg) {
nvgpu_err(g, "ce: gk20a tsg not available");
goto end;
}
if (!ce_ctx->tsg) {
nvgpu_err(g, "ce: gk20a tsg not available");
goto end;
}
/* always kernel client needs privileged channel */
@@ -481,12 +479,10 @@ u32 gk20a_ce_create_context(struct gk20a *g,
goto end;
}
if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch);
if (err) {
nvgpu_err(g, "ce: unable to bind to tsg");
goto end;
}
err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch);
if (err) {
nvgpu_err(g, "ce: unable to bind to tsg");
goto end;
}
/* allocate gpfifo (1024 should be more than enough) */

View File

@@ -63,8 +63,6 @@ struct gk20a;
#define NVGPU_MM_HONORS_APERTURE 17
/* unified or split memory with separate vidmem? */
#define NVGPU_MM_UNIFIED_MEMORY 18
/* kernel mode ce vidmem clearing channels need to be in a tsg */
#define NVGPU_MM_CE_TSG_REQUIRED 19
/* User-space managed address spaces support */
#define NVGPU_SUPPORT_USERSPACE_MANAGED_AS 20
/* IO coherence support is available */