mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: Remove TSG required flag
Remove nvgpu internal flag indicating that TSGs are required. We now require TSGs always. This also fixes a regression where CE channels were back to using bare channels on gp106. Bug 1842197 Change-Id: Id359e5a455fb324278636bb8994b583936490ffd Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1628481 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Deepak Nibade <dnibade@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4f67a794dd
commit
14fa8207e2
@@ -182,8 +182,6 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
|
|||||||
platform->unified_memory);
|
platform->unified_memory);
|
||||||
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
|
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
|
||||||
platform->unify_address_spaces);
|
platform->unify_address_spaces);
|
||||||
__nvgpu_set_enabled(g, NVGPU_MM_CE_TSG_REQUIRED,
|
|
||||||
platform->tsg_required);
|
|
||||||
|
|
||||||
nvgpu_mutex_init(&g->mm.tlb_lock);
|
nvgpu_mutex_init(&g->mm.tlb_lock);
|
||||||
nvgpu_mutex_init(&g->mm.priv_lock);
|
nvgpu_mutex_init(&g->mm.priv_lock);
|
||||||
|
|||||||
@@ -244,7 +244,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
|||||||
.vbios_min_version = 0x88001e00,
|
.vbios_min_version = 0x88001e00,
|
||||||
.hardcode_sw_threshold = false,
|
.hardcode_sw_threshold = false,
|
||||||
.run_preos = true,
|
.run_preos = true,
|
||||||
.tsg_required = true,
|
|
||||||
},
|
},
|
||||||
{ /* DEVICE=PG503 SKU 200 ES */
|
{ /* DEVICE=PG503 SKU 200 ES */
|
||||||
/* ptimer src frequency in hz */
|
/* ptimer src frequency in hz */
|
||||||
@@ -279,7 +278,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
|||||||
.vbios_min_version = 0x88001e00,
|
.vbios_min_version = 0x88001e00,
|
||||||
.hardcode_sw_threshold = false,
|
.hardcode_sw_threshold = false,
|
||||||
.run_preos = true,
|
.run_preos = true,
|
||||||
.tsg_required = true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/* ptimer src frequency in hz */
|
/* ptimer src frequency in hz */
|
||||||
@@ -314,7 +312,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
|||||||
.vbios_min_version = 0x88000126,
|
.vbios_min_version = 0x88000126,
|
||||||
.hardcode_sw_threshold = false,
|
.hardcode_sw_threshold = false,
|
||||||
.run_preos = true,
|
.run_preos = true,
|
||||||
.tsg_required = true,
|
|
||||||
},
|
},
|
||||||
{ /* SKU250 */
|
{ /* SKU250 */
|
||||||
/* ptimer src frequency in hz */
|
/* ptimer src frequency in hz */
|
||||||
@@ -349,7 +346,6 @@ static struct gk20a_platform nvgpu_pci_device[] = {
|
|||||||
.vbios_min_version = 0x1,
|
.vbios_min_version = 0x1,
|
||||||
.hardcode_sw_threshold = false,
|
.hardcode_sw_threshold = false,
|
||||||
.run_preos = true,
|
.run_preos = true,
|
||||||
.tsg_required = true,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -224,9 +224,6 @@ struct gk20a_platform {
|
|||||||
/* unified or split memory with separate vidmem? */
|
/* unified or split memory with separate vidmem? */
|
||||||
bool unified_memory;
|
bool unified_memory;
|
||||||
|
|
||||||
/* true if all channels must be in TSG */
|
|
||||||
bool tsg_required;
|
|
||||||
|
|
||||||
/* minimum supported VBIOS version */
|
/* minimum supported VBIOS version */
|
||||||
u32 vbios_min_version;
|
u32 vbios_min_version;
|
||||||
|
|
||||||
|
|||||||
@@ -972,6 +972,4 @@ struct gk20a_platform gm20b_tegra_platform = {
|
|||||||
.soc_name = "tegra21x",
|
.soc_name = "tegra21x",
|
||||||
|
|
||||||
.unified_memory = true,
|
.unified_memory = true,
|
||||||
|
|
||||||
.tsg_required = true,
|
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -417,8 +417,6 @@ struct gk20a_platform gp10b_tegra_platform = {
|
|||||||
.soc_name = "tegra18x",
|
.soc_name = "tegra18x",
|
||||||
|
|
||||||
.unified_memory = true,
|
.unified_memory = true,
|
||||||
|
|
||||||
.tsg_required = true,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -222,8 +222,6 @@ struct gk20a_platform t19x_gpu_tegra_platform = {
|
|||||||
.honors_aperture = true,
|
.honors_aperture = true,
|
||||||
.unified_memory = true,
|
.unified_memory = true,
|
||||||
|
|
||||||
.tsg_required = true,
|
|
||||||
|
|
||||||
.reset_assert = gp10b_tegra_reset_assert,
|
.reset_assert = gp10b_tegra_reset_assert,
|
||||||
.reset_deassert = gp10b_tegra_reset_deassert,
|
.reset_deassert = gp10b_tegra_reset_deassert,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -66,6 +66,4 @@ struct gk20a_platform vgpu_tegra_platform = {
|
|||||||
.devfreq_governor = "userspace",
|
.devfreq_governor = "userspace",
|
||||||
|
|
||||||
.virtual_dev = true,
|
.virtual_dev = true,
|
||||||
|
|
||||||
.tsg_required = true,
|
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -456,14 +456,12 @@ u32 gk20a_ce_create_context(struct gk20a *g,
|
|||||||
|
|
||||||
ce_ctx->vm = g->mm.ce.vm;
|
ce_ctx->vm = g->mm.ce.vm;
|
||||||
|
|
||||||
if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
|
/* allocate a tsg if needed */
|
||||||
/* allocate a tsg if needed */
|
ce_ctx->tsg = gk20a_tsg_open(g);
|
||||||
ce_ctx->tsg = gk20a_tsg_open(g);
|
|
||||||
|
|
||||||
if (!ce_ctx->tsg) {
|
if (!ce_ctx->tsg) {
|
||||||
nvgpu_err(g, "ce: gk20a tsg not available");
|
nvgpu_err(g, "ce: gk20a tsg not available");
|
||||||
goto end;
|
goto end;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* always kernel client needs privileged channel */
|
/* always kernel client needs privileged channel */
|
||||||
@@ -481,12 +479,10 @@ u32 gk20a_ce_create_context(struct gk20a *g,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
|
err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch);
|
||||||
err = gk20a_tsg_bind_channel(ce_ctx->tsg, ce_ctx->ch);
|
if (err) {
|
||||||
if (err) {
|
nvgpu_err(g, "ce: unable to bind to tsg");
|
||||||
nvgpu_err(g, "ce: unable to bind to tsg");
|
goto end;
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate gpfifo (1024 should be more than enough) */
|
/* allocate gpfifo (1024 should be more than enough) */
|
||||||
|
|||||||
@@ -63,8 +63,6 @@ struct gk20a;
|
|||||||
#define NVGPU_MM_HONORS_APERTURE 17
|
#define NVGPU_MM_HONORS_APERTURE 17
|
||||||
/* unified or split memory with separate vidmem? */
|
/* unified or split memory with separate vidmem? */
|
||||||
#define NVGPU_MM_UNIFIED_MEMORY 18
|
#define NVGPU_MM_UNIFIED_MEMORY 18
|
||||||
/* kernel mode ce vidmem clearing channels need to be in a tsg */
|
|
||||||
#define NVGPU_MM_CE_TSG_REQUIRED 19
|
|
||||||
/* User-space managed address spaces support */
|
/* User-space managed address spaces support */
|
||||||
#define NVGPU_SUPPORT_USERSPACE_MANAGED_AS 20
|
#define NVGPU_SUPPORT_USERSPACE_MANAGED_AS 20
|
||||||
/* IO coherence support is available */
|
/* IO coherence support is available */
|
||||||
|
|||||||
Reference in New Issue
Block a user