mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Force the PMU VM to use 128K large pages (gm20b)
Add a WAR for gm20b that allows us to force the PMU VM to use 128K large pages. For some reason setting the small page size to 64K breaks the PMU boot. Unclear why. Bug needs to be filed and fixed. Once fixed this patch can and should be reverted. Bug 200105199 Change-Id: I2b4c9e214e2a6dff33bea18bd2359c33364ba03f Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1782769 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bcf83fab33
commit
652da81169
@@ -202,13 +202,21 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
|
||||
struct gk20a *g = gk20a_from_mm(mm);
|
||||
struct nvgpu_mem *inst_block = &mm->pmu.inst_block;
|
||||
u32 big_page_size = g->ops.mm.get_default_big_page_size();
|
||||
u32 low_hole, aperture_size;
|
||||
u64 low_hole, aperture_size;
|
||||
|
||||
/*
|
||||
* For some reason the maxwell PMU code is dependent on the large page
|
||||
* size. No reason AFAICT for this. Probably a bug somewhere.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM)) {
|
||||
big_page_size = SZ_128K;
|
||||
}
|
||||
|
||||
/*
|
||||
* No user region - so we will pass that as zero sized.
|
||||
*/
|
||||
low_hole = SZ_4K * 16;
|
||||
aperture_size = GK20A_PMU_VA_SIZE * 2;
|
||||
low_hole = SZ_4K * 16UL;
|
||||
aperture_size = GK20A_PMU_VA_SIZE;
|
||||
|
||||
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
|
||||
nvgpu_log_info(g, "pmu vm size = 0x%x", mm->pmu.aperture_size);
|
||||
|
||||
@@ -81,6 +81,8 @@ struct gk20a;
|
||||
#define NVGPU_USE_COHERENT_SYSMEM 26
|
||||
/* Use physical scatter tables instead of IOMMU */
|
||||
#define NVGPU_MM_USE_PHYSICAL_SG 27
|
||||
/* WAR for gm20b chips. */
|
||||
#define NVGPU_MM_FORCE_128K_PMU_VM 28
|
||||
|
||||
/*
|
||||
* Host flags
|
||||
|
||||
@@ -215,6 +215,8 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
|
||||
platform->unified_memory);
|
||||
__nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
|
||||
platform->unify_address_spaces);
|
||||
__nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM,
|
||||
platform->force_128K_pmu_vm);
|
||||
|
||||
nvgpu_mutex_init(&g->mm.tlb_lock);
|
||||
nvgpu_mutex_init(&g->mm.priv_lock);
|
||||
|
||||
@@ -244,6 +244,8 @@ struct gk20a_platform {
|
||||
bool honors_aperture;
|
||||
/* unified or split memory with separate vidmem? */
|
||||
bool unified_memory;
|
||||
/* WAR for gm20b chips. */
|
||||
bool force_128K_pmu_vm;
|
||||
|
||||
/*
|
||||
* DMA mask for Linux (both coh and non-coh). If not set defaults to
|
||||
|
||||
@@ -952,6 +952,7 @@ struct gk20a_platform gm20b_tegra_platform = {
|
||||
|
||||
.unified_memory = true,
|
||||
.dma_mask = DMA_BIT_MASK(34),
|
||||
.force_128K_pmu_vm = true,
|
||||
|
||||
.secure_buffer_size = 335872,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user