gpu: nvgpu: Common VM initializer

Merge initialization code from gk20a_init_system_vm(),
gk20a_init_bar1_vm() and gk20a_vm_alloc_share() into gk20a_init_vm().

Remove redundant page size data, and move the page size fields to be
VM specific.

Bug 1558739
Bug 1560370

Change-Id: I4557d9e04d65ccb48fe1f2b116dd1bfa74cae98e
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Terje Bergstrom
2014-10-03 07:32:19 +03:00
committed by Dan Willemsen
parent 5200902f57
commit ecc6f27fd1
5 changed files with 247 additions and 327 deletions

View File

@@ -1887,9 +1887,9 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
gpu->bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */ gpu->bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */
gpu->big_page_size = g->mm.big_page_size; gpu->big_page_size = g->mm.pmu.vm.big_page_size;
gpu->compression_page_size = g->mm.compression_page_size; gpu->compression_page_size = g->mm.pmu.vm.compression_page_size;
gpu->pde_coverage_bit_count = g->mm.pde_stride_shift; gpu->pde_coverage_bit_count = g->mm.pmu.vm.pde_stride_shift;
gpu->flags = NVGPU_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS gpu->flags = NVGPU_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS
| NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS; | NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS;

View File

@@ -98,15 +98,9 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
u64 first_vaddr, u64 last_vaddr, u64 first_vaddr, u64 last_vaddr,
u8 kind_v, u32 ctag_offset, bool cacheable, u8 kind_v, u32 ctag_offset, bool cacheable,
int rw_flag); int rw_flag);
static int gk20a_init_system_vm(struct mm_gk20a *mm); static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm);
static int gk20a_init_bar1_vm(struct mm_gk20a *mm); static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm);
/* note: keep the page sizes sorted lowest to highest here */
static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 };
static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
0x1ffffLL };
static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
struct gk20a_dmabuf_priv { struct gk20a_dmabuf_priv {
struct mutex lock; struct mutex lock;
@@ -290,7 +284,7 @@ void gk20a_remove_mm_support(struct mm_gk20a *mm)
int gk20a_init_mm_setup_sw(struct gk20a *g) int gk20a_init_mm_setup_sw(struct gk20a *g)
{ {
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
int i; int err;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -301,49 +295,19 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
mm->g = g; mm->g = g;
mutex_init(&mm->l2_op_lock); mutex_init(&mm->l2_op_lock);
mm->big_page_size = gmmu_page_sizes[gmmu_page_size_big];
mm->compression_page_size = gmmu_page_sizes[gmmu_page_size_big];
mm->pde_stride = mm->big_page_size << 10;
mm->pde_stride_shift = ilog2(mm->pde_stride);
BUG_ON(mm->pde_stride_shift > 31); /* we have assumptions about this */
for (i = 0; i < ARRAY_SIZE(gmmu_page_sizes); i++) {
u32 num_ptes, pte_space, num_pages;
/* assuming "full" page tables */
num_ptes = mm->pde_stride / gmmu_page_sizes[i];
pte_space = num_ptes * gmmu_pte__size_v();
/* allocate whole pages */
pte_space = roundup(pte_space, PAGE_SIZE);
num_pages = pte_space / PAGE_SIZE;
/* make sure "order" is viable */
BUG_ON(!is_power_of_2(num_pages));
mm->page_table_sizing[i].num_ptes = num_ptes;
mm->page_table_sizing[i].order = ilog2(num_pages);
}
/*TBD: make channel vm size configurable */ /*TBD: make channel vm size configurable */
mm->channel.size = 1ULL << NV_GMMU_VA_RANGE; mm->channel.size = 1ULL << NV_GMMU_VA_RANGE;
gk20a_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20)); gk20a_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20));
gk20a_dbg_info("small page-size (%dKB) pte array: %dKB", err = gk20a_init_bar1_vm(mm);
gmmu_page_sizes[gmmu_page_size_small] >> 10, if (err)
(mm->page_table_sizing[gmmu_page_size_small].num_ptes * return err;
gmmu_pte__size_v()) >> 10);
gk20a_dbg_info("big page-size (%dKB) pte array: %dKB", err = gk20a_init_system_vm(mm);
gmmu_page_sizes[gmmu_page_size_big] >> 10, if (err)
(mm->page_table_sizing[gmmu_page_size_big].num_ptes * return err;
gmmu_pte__size_v()) >> 10);
gk20a_init_bar1_vm(mm);
gk20a_init_system_vm(mm);
/* set vm_alloc_share op here as gk20a_as_alloc_share needs it */ /* set vm_alloc_share op here as gk20a_as_alloc_share needs it */
g->ops.mm.vm_alloc_share = gk20a_vm_alloc_share; g->ops.mm.vm_alloc_share = gk20a_vm_alloc_share;
@@ -368,12 +332,9 @@ static int gk20a_init_mm_setup_hw(struct gk20a *g)
{ {
u32 fb_mmu_ctrl = gk20a_readl(g, fb_mmu_ctrl_r()); u32 fb_mmu_ctrl = gk20a_readl(g, fb_mmu_ctrl_r());
if (gmmu_page_sizes[gmmu_page_size_big] == SZ_128K) fb_mmu_ctrl = (fb_mmu_ctrl &
fb_mmu_ctrl = (fb_mmu_ctrl & ~fb_mmu_ctrl_vm_pg_size_f(~0x0)) |
~fb_mmu_ctrl_vm_pg_size_f(~0x0)) | fb_mmu_ctrl_vm_pg_size_128kb_f();
fb_mmu_ctrl_vm_pg_size_128kb_f();
else
BUG_ON(1); /* no support/testing for larger ones yet */
gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl); gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl);
} }
@@ -633,7 +594,7 @@ int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
gk20a_dbg_fn(""); gk20a_dbg_fn("");
/* allocate enough pages for the table */ /* allocate enough pages for the table */
pte_order = vm->mm->page_table_sizing[gmmu_pgsz_idx].order; pte_order = vm->page_table_sizing[gmmu_pgsz_idx].order;
err = alloc_gmmu_pages(vm, pte_order, &handle, &sgt, &size); err = alloc_gmmu_pages(vm, pte_order, &handle, &sgt, &size);
if (err) if (err)
@@ -654,10 +615,10 @@ void pde_range_from_vaddr_range(struct vm_gk20a *vm,
u64 addr_lo, u64 addr_hi, u64 addr_lo, u64 addr_hi,
u32 *pde_lo, u32 *pde_hi) u32 *pde_lo, u32 *pde_hi)
{ {
*pde_lo = (u32)(addr_lo >> vm->mm->pde_stride_shift); *pde_lo = (u32)(addr_lo >> vm->pde_stride_shift);
*pde_hi = (u32)(addr_hi >> vm->mm->pde_stride_shift); *pde_hi = (u32)(addr_hi >> vm->pde_stride_shift);
gk20a_dbg(gpu_dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d", gk20a_dbg(gpu_dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d",
addr_lo, addr_hi, vm->mm->pde_stride_shift); addr_lo, addr_hi, vm->pde_stride_shift);
gk20a_dbg(gpu_dbg_pte, "pde_lo=%d pde_hi=%d", gk20a_dbg(gpu_dbg_pte, "pde_lo=%d pde_hi=%d",
*pde_lo, *pde_hi); *pde_lo, *pde_hi);
} }
@@ -672,10 +633,11 @@ u32 pte_index_from_vaddr(struct vm_gk20a *vm,
{ {
u32 ret; u32 ret;
/* mask off pde part */ /* mask off pde part */
addr = addr & ((((u64)1) << vm->mm->pde_stride_shift) - ((u64)1)); addr = addr & ((((u64)1) << vm->pde_stride_shift) - ((u64)1));
/* shift over to get pte index. note assumption that pte index /* shift over to get pte index. note assumption that pte index
* doesn't leak over into the high 32b */ * doesn't leak over into the high 32b */
ret = (u32)(addr >> gmmu_page_shifts[pgsz_idx]); ret = (u32)(addr >> ilog2(vm->gmmu_page_sizes[pgsz_idx]));
gk20a_dbg(gpu_dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret); gk20a_dbg(gpu_dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret);
return ret; return ret;
@@ -715,7 +677,7 @@ int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm,
return 0; return 0;
gk20a_dbg(gpu_dbg_pte, "alloc %dKB ptes for pde %d", gk20a_dbg(gpu_dbg_pte, "alloc %dKB ptes for pde %d",
gmmu_page_sizes[gmmu_pgsz_idx]/1024, i); vm->gmmu_page_sizes[gmmu_pgsz_idx]/1024, i);
err = zalloc_gmmu_page_table_gk20a(vm, gmmu_pgsz_idx, pte); err = zalloc_gmmu_page_table_gk20a(vm, gmmu_pgsz_idx, pte);
if (err) if (err)
@@ -854,18 +816,18 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
int err; int err;
u64 offset; u64 offset;
u32 start_page_nr = 0, num_pages; u32 start_page_nr = 0, num_pages;
u64 gmmu_page_size = gmmu_page_sizes[gmmu_pgsz_idx]; u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
if (gmmu_pgsz_idx >= ARRAY_SIZE(gmmu_page_sizes)) { if (gmmu_pgsz_idx >= gmmu_nr_page_sizes) {
dev_warn(dev_from_vm(vm), dev_warn(dev_from_vm(vm),
"invalid page size requested in gk20a vm alloc"); "invalid page size requested in gk20a vm alloc");
return -EINVAL; return 0;
} }
if ((gmmu_pgsz_idx == gmmu_page_size_big) && !vm->big_pages) { if ((gmmu_pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
dev_warn(dev_from_vm(vm), dev_warn(dev_from_vm(vm),
"unsupportd page size requested"); "unsupportd page size requested");
return -EINVAL; return 0;
} }
@@ -874,10 +836,10 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1); size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1);
gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size, gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
gmmu_page_sizes[gmmu_pgsz_idx]>>10); vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10);
/* The vma allocator represents page accounting. */ /* The vma allocator represents page accounting. */
num_pages = size >> gmmu_page_shifts[gmmu_pgsz_idx]; num_pages = size >> ilog2(vm->gmmu_page_sizes[gmmu_pgsz_idx]);
err = vma->alloc(vma, &start_page_nr, num_pages); err = vma->alloc(vma, &start_page_nr, num_pages);
@@ -887,7 +849,8 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
return 0; return 0;
} }
offset = (u64)start_page_nr << gmmu_page_shifts[gmmu_pgsz_idx]; offset = (u64)start_page_nr <<
ilog2(vm->gmmu_page_sizes[gmmu_pgsz_idx]);
gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset); gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
return offset; return offset;
@@ -898,8 +861,8 @@ int gk20a_vm_free_va(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx) enum gmmu_pgsz_gk20a pgsz_idx)
{ {
struct gk20a_allocator *vma = &vm->vma[pgsz_idx]; struct gk20a_allocator *vma = &vm->vma[pgsz_idx];
u32 page_size = gmmu_page_sizes[pgsz_idx]; u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
u32 page_shift = gmmu_page_shifts[pgsz_idx]; u32 page_shift = ilog2(page_size);
u32 start_page_nr, num_pages; u32 start_page_nr, num_pages;
int err; int err;
@@ -1011,26 +974,25 @@ struct buffer_attrs {
u8 uc_kind_v; u8 uc_kind_v;
}; };
static void gmmu_select_page_size(struct buffer_attrs *bfr) static void gmmu_select_page_size(struct vm_gk20a *vm,
struct buffer_attrs *bfr)
{ {
int i; int i;
/* choose the biggest first (top->bottom) */ /* choose the biggest first (top->bottom) */
for (i = (gmmu_nr_page_sizes-1); i >= 0; i--) for (i = gmmu_nr_page_sizes-1; i >= 0; i--)
if (!(gmmu_page_offset_masks[i] & bfr->align)) { if (!((vm->gmmu_page_sizes[i] - 1) & bfr->align)) {
/* would like to add this too but nvmap returns the
* original requested size not the allocated size.
* (!(gmmu_page_offset_masks[i] & bfr->size)) */
bfr->pgsz_idx = i; bfr->pgsz_idx = i;
break; break;
} }
} }
static int setup_buffer_kind_and_compression(struct device *d, static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
u32 flags, u32 flags,
struct buffer_attrs *bfr, struct buffer_attrs *bfr,
enum gmmu_pgsz_gk20a pgsz_idx) enum gmmu_pgsz_gk20a pgsz_idx)
{ {
bool kind_compressible; bool kind_compressible;
struct device *d = dev_from_gk20a(vm->mm->g);
if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v())) if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v()))
bfr->kind_v = gmmu_pte_kind_pitch_v(); bfr->kind_v = gmmu_pte_kind_pitch_v();
@@ -1055,7 +1017,7 @@ static int setup_buffer_kind_and_compression(struct device *d,
} }
/* comptags only supported for suitable kinds, 128KB pagesize */ /* comptags only supported for suitable kinds, 128KB pagesize */
if (unlikely(kind_compressible && if (unlikely(kind_compressible &&
(gmmu_page_sizes[pgsz_idx] != 128*1024))) { (vm->gmmu_page_sizes[pgsz_idx] != 128*1024))) {
/* /*
gk20a_warn(d, "comptags specified" gk20a_warn(d, "comptags specified"
" but pagesize being used doesn't support it");*/ " but pagesize being used doesn't support it");*/
@@ -1088,7 +1050,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
return -EINVAL; return -EINVAL;
} }
if (map_offset & gmmu_page_offset_masks[bfr->pgsz_idx]) { if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) {
gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx", gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx",
map_offset); map_offset);
return -EINVAL; return -EINVAL;
@@ -1381,7 +1343,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
gmmu_page_size_big : gmmu_page_size_small; gmmu_page_size_big : gmmu_page_size_small;
} else { } else {
if (vm->big_pages) if (vm->big_pages)
gmmu_select_page_size(&bfr); gmmu_select_page_size(vm, &bfr);
else else
bfr.pgsz_idx = gmmu_page_size_small; bfr.pgsz_idx = gmmu_page_size_small;
} }
@@ -1398,7 +1360,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
err = -EINVAL; err = -EINVAL;
goto clean_up; goto clean_up;
} }
gmmu_page_size = gmmu_page_sizes[bfr.pgsz_idx]; gmmu_page_size = vm->gmmu_page_sizes[bfr.pgsz_idx];
/* Check if we should use a fixed offset for mapping this buffer */ /* Check if we should use a fixed offset for mapping this buffer */
@@ -1416,7 +1378,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
if (sgt) if (sgt)
*sgt = bfr.sgt; *sgt = bfr.sgt;
err = setup_buffer_kind_and_compression(d, flags, &bfr, bfr.pgsz_idx); err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx);
if (unlikely(err)) { if (unlikely(err)) {
gk20a_err(d, "failure setting up kind and compression"); gk20a_err(d, "failure setting up kind and compression");
goto clean_up; goto clean_up;
@@ -1729,7 +1691,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
u32 pte_w[2] = {0, 0}; /* invalid pte */ u32 pte_w[2] = {0, 0}; /* invalid pte */
u32 ctag = ctag_offset; u32 ctag = ctag_offset;
u32 ctag_incr; u32 ctag_incr;
u32 page_size = gmmu_page_sizes[pgsz_idx]; u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
u64 addr = 0; u64 addr = 0;
u64 space_to_skip = buffer_offset; u64 space_to_skip = buffer_offset;
@@ -1783,7 +1745,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
pte_lo = 0; pte_lo = 0;
if ((pde_i != pde_hi) && (pde_hi != pde_lo)) if ((pde_i != pde_hi) && (pde_hi != pde_lo))
pte_hi = vm->mm->page_table_sizing[pgsz_idx].num_ptes-1; pte_hi = vm->page_table_sizing[pgsz_idx].num_ptes-1;
else else
pte_hi = pte_index_from_vaddr(vm, last_vaddr, pte_hi = pte_index_from_vaddr(vm, last_vaddr,
pgsz_idx); pgsz_idx);
@@ -1863,7 +1825,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
* free/alloc/free/alloc. * free/alloc/free/alloc.
*/ */
free_gmmu_pages(vm, pte->ref, pte->sgt, free_gmmu_pages(vm, pte->ref, pte->sgt,
vm->mm->page_table_sizing[pgsz_idx].order, vm->page_table_sizing[pgsz_idx].order,
pte->size); pte->size);
pte->ref = NULL; pte->ref = NULL;
@@ -1973,7 +1935,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
{ {
struct mm_gk20a *mm = vm->mm; struct mm_gk20a *mm = vm->mm;
struct gk20a *g = mm->g; struct gk20a *g = mm->g;
u32 pgsz = gmmu_page_sizes[pgsz_idx]; u32 pgsz = vm->gmmu_page_sizes[pgsz_idx];
u32 i; u32 i;
dma_addr_t iova; dma_addr_t iova;
@@ -1981,7 +1943,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
if (!vm->zero_page_cpuva) { if (!vm->zero_page_cpuva) {
int err = 0; int err = 0;
vm->zero_page_cpuva = dma_alloc_coherent(&g->dev->dev, vm->zero_page_cpuva = dma_alloc_coherent(&g->dev->dev,
mm->big_page_size, vm->big_page_size,
&iova, &iova,
GFP_KERNEL); GFP_KERNEL);
if (!vm->zero_page_cpuva) { if (!vm->zero_page_cpuva) {
@@ -1992,9 +1954,9 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
vm->zero_page_iova = iova; vm->zero_page_iova = iova;
err = gk20a_get_sgtable(&g->dev->dev, &vm->zero_page_sgt, err = gk20a_get_sgtable(&g->dev->dev, &vm->zero_page_sgt,
vm->zero_page_cpuva, vm->zero_page_iova, vm->zero_page_cpuva, vm->zero_page_iova,
mm->big_page_size); vm->big_page_size);
if (err) { if (err) {
dma_free_coherent(&g->dev->dev, mm->big_page_size, dma_free_coherent(&g->dev->dev, vm->big_page_size,
vm->zero_page_cpuva, vm->zero_page_cpuva,
vm->zero_page_iova); vm->zero_page_iova);
vm->zero_page_iova = 0; vm->zero_page_iova = 0;
@@ -2058,7 +2020,7 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
u64 vaddr = mapped_buffer->addr; u64 vaddr = mapped_buffer->addr;
u32 pgsz_idx = mapped_buffer->pgsz_idx; u32 pgsz_idx = mapped_buffer->pgsz_idx;
u32 num_pages = mapped_buffer->size >> u32 num_pages = mapped_buffer->size >>
gmmu_page_shifts[pgsz_idx]; ilog2(vm->gmmu_page_sizes[pgsz_idx]);
/* there is little we can do if this fails... */ /* there is little we can do if this fails... */
if (g->ops.mm.put_empty) { if (g->ops.mm.put_empty) {
@@ -2082,7 +2044,8 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
gk20a_mem_flag_none); gk20a_mem_flag_none);
gk20a_dbg(gpu_dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d", gk20a_dbg(gpu_dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
vm_aspace_id(vm), gmmu_page_sizes[mapped_buffer->pgsz_idx], vm_aspace_id(vm),
vm->gmmu_page_sizes[mapped_buffer->pgsz_idx],
hi32(mapped_buffer->addr), lo32(mapped_buffer->addr), hi32(mapped_buffer->addr), lo32(mapped_buffer->addr),
mapped_buffer->own_mem_ref); mapped_buffer->own_mem_ref);
@@ -2159,14 +2122,14 @@ void gk20a_vm_remove_support(struct vm_gk20a *vm)
&vm->pdes.ptes[gmmu_page_size_small][i]; &vm->pdes.ptes[gmmu_page_size_small][i];
if (pte->ref) { if (pte->ref) {
free_gmmu_pages(vm, pte->ref, pte->sgt, free_gmmu_pages(vm, pte->ref, pte->sgt,
vm->mm->page_table_sizing[gmmu_page_size_small].order, vm->page_table_sizing[gmmu_page_size_small].order,
pte->size); pte->size);
pte->ref = NULL; pte->ref = NULL;
} }
pte = &vm->pdes.ptes[gmmu_page_size_big][i]; pte = &vm->pdes.ptes[gmmu_page_size_big][i];
if (pte->ref) { if (pte->ref) {
free_gmmu_pages(vm, pte->ref, pte->sgt, free_gmmu_pages(vm, pte->ref, pte->sgt,
vm->mm->page_table_sizing[gmmu_page_size_big].order, vm->page_table_sizing[gmmu_page_size_big].order,
pte->size); pte->size);
pte->ref = NULL; pte->ref = NULL;
} }
@@ -2184,7 +2147,7 @@ void gk20a_vm_remove_support(struct vm_gk20a *vm)
/* release zero page if used */ /* release zero page if used */
if (vm->zero_page_cpuva) if (vm->zero_page_cpuva)
dma_free_coherent(&g->dev->dev, vm->mm->big_page_size, dma_free_coherent(&g->dev->dev, vm->big_page_size,
vm->zero_page_cpuva, vm->zero_page_iova); vm->zero_page_cpuva, vm->zero_page_iova);
/* vm is not used anymore. release it. */ /* vm is not used anymore. release it. */
@@ -2208,34 +2171,62 @@ void gk20a_vm_put(struct vm_gk20a *vm)
kref_put(&vm->ref, gk20a_vm_remove_support_kref); kref_put(&vm->ref, gk20a_vm_remove_support_kref);
} }
/* address space interfaces for the gk20a module */ static int gk20a_init_vm(struct mm_gk20a *mm,
int gk20a_vm_alloc_share(struct gk20a_as_share *as_share) struct vm_gk20a *vm,
u64 low_hole,
u64 aperture_size,
bool big_pages,
char *name)
{ {
struct gk20a_as *as = as_share->as; int err, i;
struct gk20a *g = gk20a_from_as(as);
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm;
u64 vma_size;
u32 num_pages, low_hole_pages; u32 num_pages, low_hole_pages;
char name[32]; char alloc_name[32];
int err; size_t vma_size;
gk20a_dbg_fn(""); /* note: keep the page sizes sorted lowest to highest here */
u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
as_share->vm = vm;
vm->mm = mm; vm->mm = mm;
vm->as_share = as_share;
vm->big_pages = true; vm->va_start = low_hole;
vm->va_limit = aperture_size;
vm->big_pages = big_pages;
vm->big_page_size = gmmu_page_sizes[gmmu_page_size_big];
vm->compression_page_size = gmmu_page_sizes[gmmu_page_size_big];
vm->pde_stride = vm->big_page_size << 10;
vm->pde_stride_shift = ilog2(vm->pde_stride);
for (i = 0; i < gmmu_nr_page_sizes; i++) {
u32 num_ptes, pte_space, num_pages;
vm->gmmu_page_sizes[i] = gmmu_page_sizes[i];
/* assuming "full" page tables */
num_ptes = vm->pde_stride / gmmu_page_sizes[i];
pte_space = num_ptes * gmmu_pte__size_v();
/* allocate whole pages */
pte_space = roundup(pte_space, PAGE_SIZE);
num_pages = pte_space / PAGE_SIZE;
/* make sure "order" is viable */
BUG_ON(!is_power_of_2(num_pages));
vm->page_table_sizing[i].num_ptes = num_ptes;
vm->page_table_sizing[i].order = ilog2(num_pages);
}
gk20a_dbg_info("small page-size (%dKB) pte array: %dKB",
vm->gmmu_page_sizes[gmmu_page_size_small] >> 10,
(vm->page_table_sizing[gmmu_page_size_small].num_ptes *
gmmu_pte__size_v()) >> 10);
gk20a_dbg_info("big page-size (%dKB) pte array: %dKB",
vm->gmmu_page_sizes[gmmu_page_size_big] >> 10,
(vm->page_table_sizing[gmmu_page_size_big].num_ptes *
gmmu_pte__size_v()) >> 10);
vm->va_start = mm->pde_stride; /* create a one pde hole */
vm->va_limit = mm->channel.size; /* note this means channel.size is
really just the max */
{ {
u32 pde_lo, pde_hi; u32 pde_lo, pde_hi;
pde_range_from_vaddr_range(vm, pde_range_from_vaddr_range(vm,
@@ -2248,61 +2239,75 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
kzalloc(sizeof(struct page_table_gk20a) * kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL); vm->pdes.num_pdes, GFP_KERNEL);
if (!vm->pdes.ptes[gmmu_page_size_small]) {
err = -ENOMEM;
goto clean_up_pdes;
}
vm->pdes.ptes[gmmu_page_size_big] = vm->pdes.ptes[gmmu_page_size_big] =
kzalloc(sizeof(struct page_table_gk20a) * kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL); vm->pdes.num_pdes, GFP_KERNEL);
if (!(vm->pdes.ptes[gmmu_page_size_small] && if (!vm->pdes.ptes[gmmu_page_size_big]) {
vm->pdes.ptes[gmmu_page_size_big])) err = -ENOMEM;
return -ENOMEM; goto clean_up_pdes;
}
gk20a_dbg_info("init space for va_limit=0x%llx num_pdes=%d", gk20a_dbg_info("init space for %s va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes); name, vm->va_limit, vm->pdes.num_pdes);
/* allocate the page table directory */ /* allocate the page table directory */
err = alloc_gmmu_pages(vm, 0, &vm->pdes.ref, err = alloc_gmmu_pages(vm, 0, &vm->pdes.ref,
&vm->pdes.sgt, &vm->pdes.size); &vm->pdes.sgt, &vm->pdes.size);
if (err) if (err)
return -ENOMEM; goto clean_up_pdes;
err = map_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, &vm->pdes.kv, err = map_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, &vm->pdes.kv,
vm->pdes.size); vm->pdes.size);
if (err) { if (err) {
free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0, goto clean_up_ptes;
vm->pdes.size);
return -ENOMEM;
} }
gk20a_dbg(gpu_dbg_pte, "pdes.kv = 0x%p, pdes.phys = 0x%llx", gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
vm->pdes.kv, vm->pdes.kv, gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */ /* we could release vm->pdes.kv but it's only one page... */
/* low-half: alloc small pages */ /* low-half: alloc small pages */
/* high-half: alloc big pages */ /* high-half: alloc big pages */
vma_size = mm->channel.size >> 1; vma_size = vm->va_limit;
if (big_pages)
vma_size /= 2;
snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, num_pages = (u32)(vma_size >>
gmmu_page_sizes[gmmu_page_size_small]>>10); ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
num_pages = (u32)(vma_size >> gmmu_page_shifts[gmmu_page_size_small]);
/* num_pages above is without regard to the low-side hole. */ /* num_pages above is without regard to the low-side hole. */
low_hole_pages = (vm->va_start >> low_hole_pages = (vm->va_start >>
gmmu_page_shifts[gmmu_page_size_small]); ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
gk20a_allocator_init(&vm->vma[gmmu_page_size_small], name, snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", name,
low_hole_pages, /* start */ vm->gmmu_page_sizes[gmmu_page_size_small]>>10);
num_pages - low_hole_pages, /* length */ err = gk20a_allocator_init(&vm->vma[gmmu_page_size_small],
1); /* align */ alloc_name,
low_hole_pages, /*start*/
num_pages - low_hole_pages,/* length*/
1); /* align */
if (err)
goto clean_up_map_pde;
snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, if (big_pages) {
gmmu_page_sizes[gmmu_page_size_big]>>10); num_pages = (u32)((vm->va_limit / 2) >>
ilog2(vm->gmmu_page_sizes[gmmu_page_size_big]));
num_pages = (u32)(vma_size >> gmmu_page_shifts[gmmu_page_size_big]); snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB",
gk20a_allocator_init(&vm->vma[gmmu_page_size_big], name, name, vm->gmmu_page_sizes[gmmu_page_size_big]>>10);
num_pages, /* start */ err = gk20a_allocator_init(&vm->vma[gmmu_page_size_big],
num_pages, /* length */ alloc_name,
1); /* align */ num_pages, /* start */
num_pages, /* length */
1); /* align */
if (err)
goto clean_up_small_allocator;
}
vm->mapped_buffers = RB_ROOT; vm->mapped_buffers = RB_ROOT;
@@ -2310,12 +2315,48 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
kref_init(&vm->ref); kref_init(&vm->ref);
INIT_LIST_HEAD(&vm->reserved_va_list); INIT_LIST_HEAD(&vm->reserved_va_list);
return 0;
clean_up_small_allocator:
gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]);
clean_up_map_pde:
unmap_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, vm->pdes.kv);
clean_up_ptes:
free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0,
vm->pdes.size);
clean_up_pdes:
kfree(vm->pdes.ptes[gmmu_page_size_small]);
kfree(vm->pdes.ptes[gmmu_page_size_big]);
return err;
}
/* address space interfaces for the gk20a module */
int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
{
struct gk20a_as *as = as_share->as;
struct gk20a *g = gk20a_from_as(as);
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm;
char name[32];
int err;
gk20a_dbg_fn("");
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
as_share->vm = vm;
vm->as_share = as_share;
vm->enable_ctag = true; vm->enable_ctag = true;
snprintf(name, sizeof(name), "gk20a_as_%d", as_share->id);
err = gk20a_init_vm(mm, vm,
SZ_128K << 10, mm->channel.size, true, name);
return 0; return 0;
} }
int gk20a_vm_release_share(struct gk20a_as_share *as_share) int gk20a_vm_release_share(struct gk20a_as_share *as_share)
{ {
struct vm_gk20a *vm = as_share->vm; struct vm_gk20a *vm = as_share->vm;
@@ -2353,7 +2394,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
for (pgsz_idx = gmmu_page_size_small; for (pgsz_idx = gmmu_page_size_small;
pgsz_idx < gmmu_nr_page_sizes; pgsz_idx < gmmu_nr_page_sizes;
pgsz_idx++) { pgsz_idx++) {
if (gmmu_page_sizes[pgsz_idx] == args->page_size) if (vm->gmmu_page_sizes[pgsz_idx] == args->page_size)
break; break;
} }
@@ -2378,7 +2419,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
start_page_nr = 0; start_page_nr = 0;
if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
start_page_nr = (u32)(args->o_a.offset >> start_page_nr = (u32)(args->o_a.offset >>
gmmu_page_shifts[pgsz_idx]); ilog2(vm->gmmu_page_sizes[pgsz_idx]));
vma = &vm->vma[pgsz_idx]; vma = &vm->vma[pgsz_idx];
err = vma->alloc(vma, &start_page_nr, args->pages); err = vma->alloc(vma, &start_page_nr, args->pages);
@@ -2387,7 +2428,8 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
goto clean_up; goto clean_up;
} }
vaddr_start = (u64)start_page_nr << gmmu_page_shifts[pgsz_idx]; vaddr_start = (u64)start_page_nr <<
ilog2(vm->gmmu_page_sizes[pgsz_idx]);
va_node->vaddr_start = vaddr_start; va_node->vaddr_start = vaddr_start;
va_node->size = (u64)args->page_size * (u64)args->pages; va_node->size = (u64)args->page_size * (u64)args->pages;
@@ -2438,7 +2480,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
for (pgsz_idx = gmmu_page_size_small; for (pgsz_idx = gmmu_page_size_small;
pgsz_idx < gmmu_nr_page_sizes; pgsz_idx < gmmu_nr_page_sizes;
pgsz_idx++) { pgsz_idx++) {
if (gmmu_page_sizes[pgsz_idx] == args->page_size) if (vm->gmmu_page_sizes[pgsz_idx] == args->page_size)
break; break;
} }
@@ -2448,7 +2490,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
} }
start_page_nr = (u32)(args->offset >> start_page_nr = (u32)(args->offset >>
gmmu_page_shifts[pgsz_idx]); ilog2(vm->gmmu_page_sizes[pgsz_idx]));
vma = &vm->vma[pgsz_idx]; vma = &vm->vma[pgsz_idx];
err = vma->free(vma, start_page_nr, args->pages); err = vma->free(vma, start_page_nr, args->pages);
@@ -2643,6 +2685,17 @@ int gk20a_vm_unmap_buffer(struct gk20a_as_share *as_share, u64 offset)
return 0; return 0;
} }
static void gk20a_deinit_vm(struct vm_gk20a *vm)
{
gk20a_allocator_destroy(&vm->vma[gmmu_page_size_big]);
gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]);
unmap_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, vm->pdes.kv);
free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0,
vm->pdes.size);
kfree(vm->pdes.ptes[gmmu_page_size_small]);
kfree(vm->pdes.ptes[gmmu_page_size_big]);
}
static int gk20a_init_bar1_vm(struct mm_gk20a *mm) static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
{ {
int err; int err;
@@ -2650,73 +2703,24 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
void *inst_ptr; void *inst_ptr;
struct vm_gk20a *vm = &mm->bar1.vm; struct vm_gk20a *vm = &mm->bar1.vm;
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
struct device *d = dev_from_gk20a(g);
struct inst_desc *inst_block = &mm->bar1.inst_block;
u64 pde_addr; u64 pde_addr;
u32 pde_addr_lo; u32 pde_addr_lo;
u32 pde_addr_hi; u32 pde_addr_hi;
struct device *d = dev_from_gk20a(g);
struct inst_desc *inst_block = &mm->bar1.inst_block;
dma_addr_t iova; dma_addr_t iova;
vm->mm = mm;
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20; mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size); gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
gk20a_init_vm(mm, vm, SZ_4K, mm->bar1.aperture_size, false, "bar1");
vm->va_start = mm->pde_stride * 1; gk20a_dbg_info("pde pa=0x%llx",
vm->va_limit = mm->bar1.aperture_size; (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
{
u32 pde_lo, pde_hi;
pde_range_from_vaddr_range(vm,
0, vm->va_limit-1,
&pde_lo, &pde_hi);
vm->pdes.num_pdes = pde_hi + 1;
}
/* bar1 is likely only to ever use/need small page sizes. */
/* But just in case, for now... arrange for both.*/
vm->pdes.ptes[gmmu_page_size_small] =
kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL);
vm->pdes.ptes[gmmu_page_size_big] =
kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL);
if (!(vm->pdes.ptes[gmmu_page_size_small] &&
vm->pdes.ptes[gmmu_page_size_big]))
return -ENOMEM;
gk20a_dbg_info("init space for bar1 va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes);
/* allocate the page table directory */
err = alloc_gmmu_pages(vm, 0, &vm->pdes.ref,
&vm->pdes.sgt, &vm->pdes.size);
if (err)
goto clean_up;
err = map_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, &vm->pdes.kv,
vm->pdes.size);
if (err) {
free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0,
vm->pdes.size);
goto clean_up;
}
gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
vm->pdes.kv, gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */
pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl); pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl);
pde_addr_lo = u64_lo32(pde_addr >> 12); pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v());
pde_addr_hi = u64_hi32(pde_addr); pde_addr_hi = u64_hi32(pde_addr);
gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
(u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl),
pde_addr_lo, pde_addr_hi);
/* allocate instance mem for bar1 */ /* allocate instance mem for bar1 */
inst_block->size = ram_in_alloc_size_v(); inst_block->size = ram_in_alloc_size_v();
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size, inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
@@ -2724,7 +2728,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
if (!inst_block->cpuva) { if (!inst_block->cpuva) {
gk20a_err(d, "%s: memory allocation failed\n", __func__); gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto clean_up; goto clean_up_va;
} }
inst_block->iova = iova; inst_block->iova = iova;
@@ -2732,7 +2736,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
if (!inst_block->cpu_pa) { if (!inst_block->cpu_pa) {
gk20a_err(d, "%s: failed to get phys address\n", __func__); gk20a_err(d, "%s: failed to get phys address\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto clean_up; goto clean_up_inst_block;
} }
inst_pa = inst_block->cpu_pa; inst_pa = inst_block->cpu_pa;
@@ -2741,7 +2745,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
gk20a_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p", gk20a_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p",
(u64)inst_pa, inst_ptr); (u64)inst_pa, inst_ptr);
memset(inst_ptr, 0, ram_fc_size_val_v()); memset(inst_ptr, 0, inst_block->size);
gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(), gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() | ram_in_page_dir_base_target_vid_mem_f() |
@@ -2758,31 +2762,16 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit))); ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
gk20a_dbg_info("bar1 inst block ptr: %08llx", (u64)inst_pa); gk20a_dbg_info("bar1 inst block ptr: %08llx", (u64)inst_pa);
gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_bar1",
1,/*start*/
(vm->va_limit >> 12) - 1 /* length*/,
1); /* align */
/* initialize just in case we try to use it anyway */
gk20a_allocator_init(&vm->vma[gmmu_page_size_big], "gk20a_bar1-unused",
0x0badc0de, /* start */
1, /* length */
1); /* align */
vm->mapped_buffers = RB_ROOT;
mutex_init(&vm->update_gmmu_lock);
kref_init(&vm->ref);
INIT_LIST_HEAD(&vm->reserved_va_list);
return 0; return 0;
clean_up: clean_up_inst_block:
/* free, etc */
if (inst_block->cpuva) if (inst_block->cpuva)
dma_free_coherent(d, inst_block->size, dma_free_coherent(d, inst_block->size,
inst_block->cpuva, inst_block->iova); inst_block->cpuva, inst_block->iova);
inst_block->cpuva = NULL; inst_block->cpuva = NULL;
inst_block->iova = 0; inst_block->iova = 0;
clean_up_va:
gk20a_deinit_vm(vm);
return err; return err;
} }
@@ -2794,79 +2783,34 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
void *inst_ptr; void *inst_ptr;
struct vm_gk20a *vm = &mm->pmu.vm; struct vm_gk20a *vm = &mm->pmu.vm;
struct gk20a *g = gk20a_from_mm(mm); struct gk20a *g = gk20a_from_mm(mm);
struct device *d = dev_from_gk20a(g);
struct inst_desc *inst_block = &mm->pmu.inst_block;
u64 pde_addr; u64 pde_addr;
u32 pde_addr_lo; u32 pde_addr_lo;
u32 pde_addr_hi; u32 pde_addr_hi;
struct device *d = dev_from_gk20a(g);
struct inst_desc *inst_block = &mm->pmu.inst_block;
dma_addr_t iova; dma_addr_t iova;
vm->mm = mm;
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size); gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
vm->va_start = GK20A_PMU_VA_START; gk20a_init_vm(mm, vm,
vm->va_limit = vm->va_start + mm->pmu.aperture_size; SZ_128K << 10, GK20A_PMU_VA_SIZE, false, "system");
{ gk20a_dbg_info("pde pa=0x%llx",
u32 pde_lo, pde_hi; (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
pde_range_from_vaddr_range(vm,
0, vm->va_limit-1,
&pde_lo, &pde_hi);
vm->pdes.num_pdes = pde_hi + 1;
}
/* The pmu is likely only to ever use/need small page sizes. */
/* But just in case, for now... arrange for both.*/
vm->pdes.ptes[gmmu_page_size_small] =
kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL);
vm->pdes.ptes[gmmu_page_size_big] =
kzalloc(sizeof(struct page_table_gk20a) *
vm->pdes.num_pdes, GFP_KERNEL);
if (!(vm->pdes.ptes[gmmu_page_size_small] &&
vm->pdes.ptes[gmmu_page_size_big]))
return -ENOMEM;
gk20a_dbg_info("init space for pmu va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes);
/* allocate the page table directory */
err = alloc_gmmu_pages(vm, 0, &vm->pdes.ref,
&vm->pdes.sgt, &vm->pdes.size);
if (err)
goto clean_up;
err = map_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, &vm->pdes.kv,
vm->pdes.size);
if (err) {
free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0,
vm->pdes.size);
goto clean_up;
}
gk20a_dbg_info("pmu pdes phys @ 0x%llx",
(u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */
pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl); pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl);
pde_addr_lo = u64_lo32(pde_addr >> 12); pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v());
pde_addr_hi = u64_hi32(pde_addr); pde_addr_hi = u64_hi32(pde_addr);
gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
(u64)pde_addr, pde_addr_lo, pde_addr_hi);
/* allocate instance mem for pmu */ /* allocate instance mem for pmu */
inst_block->size = GK20A_PMU_INST_SIZE; inst_block->size = ram_in_alloc_size_v();
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size, inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
&iova, GFP_KERNEL); &iova, GFP_KERNEL);
if (!inst_block->cpuva) { if (!inst_block->cpuva) {
gk20a_err(d, "%s: memory allocation failed\n", __func__); gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto clean_up; goto clean_up_va;
} }
inst_block->iova = iova; inst_block->iova = iova;
@@ -2874,7 +2818,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
if (!inst_block->cpu_pa) { if (!inst_block->cpu_pa) {
gk20a_err(d, "%s: failed to get phys address\n", __func__); gk20a_err(d, "%s: failed to get phys address\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto clean_up; goto clean_up_inst_block;
} }
inst_pa = inst_block->cpu_pa; inst_pa = inst_block->cpu_pa;
@@ -2882,7 +2826,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
gk20a_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa); gk20a_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa);
memset(inst_ptr, 0, GK20A_PMU_INST_SIZE); memset(inst_ptr, 0, inst_block->size);
gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(), gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() | ram_in_page_dir_base_target_vid_mem_f() |
@@ -2898,32 +2842,16 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(), gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit))); ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_pmu",
(vm->va_start >> 12), /* start */
(vm->va_limit - vm->va_start) >> 12, /*length*/
1); /* align */
/* initialize just in case we try to use it anyway */
gk20a_allocator_init(&vm->vma[gmmu_page_size_big], "gk20a_pmu-unused",
0x0badc0de, /* start */
1, /* length */
1); /* align */
vm->mapped_buffers = RB_ROOT;
mutex_init(&vm->update_gmmu_lock);
kref_init(&vm->ref);
INIT_LIST_HEAD(&vm->reserved_va_list);
return 0; return 0;
clean_up: clean_up_inst_block:
/* free, etc */
if (inst_block->cpuva) if (inst_block->cpuva)
dma_free_coherent(d, inst_block->size, dma_free_coherent(d, inst_block->size,
inst_block->cpuva, inst_block->iova); inst_block->cpuva, inst_block->iova);
inst_block->cpuva = NULL; inst_block->cpuva = NULL;
inst_block->iova = 0; inst_block->iova = 0;
clean_up_va:
gk20a_deinit_vm(vm);
return err; return err;
} }

View File

@@ -296,6 +296,16 @@ struct vm_gk20a {
bool tlb_dirty; bool tlb_dirty;
bool mapped; bool mapped;
u32 compression_page_size;
u32 big_page_size;
u32 pde_stride;
u32 pde_stride_shift;
struct {
u32 order;
u32 num_ptes;
} page_table_sizing[gmmu_nr_page_sizes];
struct kref ref; struct kref ref;
struct mutex update_gmmu_lock; struct mutex update_gmmu_lock;
@@ -314,6 +324,7 @@ struct vm_gk20a {
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
u64 handle; u64 handle;
#endif #endif
u32 gmmu_page_sizes[gmmu_nr_page_sizes];
}; };
struct gk20a; struct gk20a;
@@ -329,17 +340,6 @@ void gk20a_mm_l2_invalidate(struct gk20a *g);
struct mm_gk20a { struct mm_gk20a {
struct gk20a *g; struct gk20a *g;
u32 compression_page_size;
u32 big_page_size;
u32 pde_stride;
u32 pde_stride_shift;
struct {
u32 order;
u32 num_ptes;
} page_table_sizing[gmmu_nr_page_sizes];
struct { struct {
u64 size; u64 size;
} channel; } channel;

View File

@@ -22,9 +22,7 @@
#define __PMU_GK20A_H__ #define __PMU_GK20A_H__
/* defined by pmu hw spec */ /* defined by pmu hw spec */
#define GK20A_PMU_VA_START ((128 * 1024) << 10)
#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) #define GK20A_PMU_VA_SIZE (512 * 1024 * 1024)
#define GK20A_PMU_INST_SIZE (4 * 1024)
#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) #define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024)
#define GK20A_PMU_SEQ_BUF_SIZE 4096 #define GK20A_PMU_SEQ_BUF_SIZE 4096

View File

@@ -20,12 +20,6 @@
#include "hw_fb_gm20b.h" #include "hw_fb_gm20b.h"
#include "hw_gr_gm20b.h" #include "hw_gr_gm20b.h"
static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 };
static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
0x1ffffLL };
static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx, enum gmmu_pgsz_gk20a pgsz_idx,
u64 first_vaddr, u64 last_vaddr, u64 first_vaddr, u64 last_vaddr,
@@ -97,9 +91,9 @@ static bool gm20b_vm_is_pde_in_range(struct vm_gk20a *vm, u64 vaddr_lo,
gk20a_dbg_fn(""); gk20a_dbg_fn("");
pde_vaddr_lo = (u64)pde << vm->mm->pde_stride_shift; pde_vaddr_lo = (u64)pde << vm->pde_stride_shift;
pde_vaddr_hi = pde_vaddr_lo | pde_vaddr_hi = pde_vaddr_lo |
((0x1UL << (vm->mm->pde_stride_shift)) - 1); ((0x1UL << (vm->pde_stride_shift)) - 1);
return ((vaddr_lo <= pde_vaddr_lo) && (vaddr_hi) >= pde_vaddr_hi); return ((vaddr_lo <= pde_vaddr_lo) && (vaddr_hi) >= pde_vaddr_hi);
} }
@@ -108,8 +102,8 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
u32 num_pages, u32 pgsz_idx, bool refplus) u32 num_pages, u32 pgsz_idx, bool refplus)
{ {
struct mm_gk20a *mm = vm->mm; struct mm_gk20a *mm = vm->mm;
u32 pgsz = gmmu_page_sizes[pgsz_idx]; u32 pgsz = vm->gmmu_page_sizes[pgsz_idx];
u32 pde_shift = vm->mm->pde_stride_shift; u32 pde_shift = vm->pde_stride_shift;
u64 vaddr_hi; u64 vaddr_hi;
u64 vaddr_pde_start; u64 vaddr_pde_start;
u32 i; u32 i;
@@ -127,7 +121,7 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, "
"pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d", "pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d",
vaddr, vaddr_hi, pde_lo, pde_hi, pgsz, vaddr, vaddr_hi, pde_lo, pde_hi, pgsz,
vm->mm->pde_stride_shift); vm->pde_stride_shift);
for (i = pde_lo; i <= pde_hi; i++) { for (i = pde_lo; i <= pde_hi; i++) {
/* Mark all ptes as sparse. */ /* Mark all ptes as sparse. */
@@ -240,7 +234,7 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, "
"pde_hi: 0x%x, pgsz_idx: %d, pde_stride_shift: %d", "pde_hi: 0x%x, pgsz_idx: %d, pde_stride_shift: %d",
vaddr, vaddr_hi, pde_lo, pde_hi, pgsz_idx, vaddr, vaddr_hi, pde_lo, pde_hi, pgsz_idx,
vm->mm->pde_stride_shift); vm->pde_stride_shift);
for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) { for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) {
struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i; struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i;
@@ -248,7 +242,7 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
if (pte->ref_cnt == 0) { if (pte->ref_cnt == 0) {
free_gmmu_pages(vm, pte->ref, pte->sgt, free_gmmu_pages(vm, pte->ref, pte->sgt,
vm->mm->page_table_sizing[pgsz_idx].order, vm->page_table_sizing[pgsz_idx].order,
pte->size); pte->size);
pte->ref = NULL; pte->ref = NULL;
update_gmmu_pde_locked(vm, pde_i); update_gmmu_pde_locked(vm, pde_i);