gpu: nvgpu: Update gk20a pde bit coverage function

The mm_gk20a.c function that returns number of bits that a PDE covers
is very useful for determing PDE size for all chips. Copy this into
the common VM code since this applies to all chips/platforms.

Bug 200105199

Change-Id: I437da4781be2fa7c540abe52b20f4c4321f6c649
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1639730
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-01-16 16:38:19 -08:00
committed by mobile promotions
parent c713934675
commit 137006fe78
4 changed files with 22 additions and 6 deletions

View File

@@ -60,6 +60,26 @@ int vm_aspace_id(struct vm_gk20a *vm)
return vm->as_share ? vm->as_share->id : -1;
}
/*
* Determine how many bits of the address space each last level PDE covers. For
* example, for gp10b, with a last level address bit PDE range of 28 to 21 the
* amount of memory each last level PDE addresses is 21 bits - i.e 2MB.
*/
int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
{
int final_pde_level = 0;
/*
* Find the second to last level of the page table programming
* heirarchy: the last level is PTEs so we really want the level
* before that which is the last level of PDEs.
*/
while (vm->mmu_levels[final_pde_level + 2].update_entry)
final_pde_level++;
return vm->mmu_levels[final_pde_level].lo_bit[0];
}
static void __nvgpu_vm_free_entries(struct vm_gk20a *vm,
struct nvgpu_gmmu_pd *pd,
int level)

View File

@@ -116,11 +116,6 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
return 0;
}
int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm)
{
return vm->mmu_levels[0].lo_bit[0];
}
/* for gk20a the "video memory" apertures here are misnomers. */
static inline u32 big_valid_pde0_bits(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u64 addr)

View File

@@ -172,7 +172,6 @@ int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
void pde_range_from_vaddr_range(struct vm_gk20a *vm,
u64 addr_lo, u64 addr_hi,
u32 *pde_lo, u32 *pde_hi);
int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm);
u32 gk20a_mm_get_iommu_bit(struct gk20a *g);
const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,

View File

@@ -218,6 +218,8 @@ void nvgpu_vm_put(struct vm_gk20a *vm);
int vm_aspace_id(struct vm_gk20a *vm);
int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm);
/* batching eliminates redundant cache flushes and invalidates */
void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch);
void nvgpu_vm_mapping_batch_finish(