diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 94c678666..10018ca43 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -467,6 +467,45 @@ static int nvgpu_init_mm_setup_vm(struct gk20a *g) return err; } +static int nvgpu_init_mm_components(struct gk20a *g) +{ + int err = 0; + struct mm_gk20a *mm = &g->mm; + + err = nvgpu_alloc_sysmem_flush(g); + if (err != 0) { + return err; + } + + err = nvgpu_init_mm_setup_bar(g); + if (err != 0) { + return err; + } + + err = nvgpu_init_mm_setup_vm(g); + if (err != 0) { + return err; + } + + err = nvgpu_init_mmu_debug(mm); + if (err != 0) { + return err; + } + + /* + * Some chips support replayable MMU faults. For such chips make sure + * SW is initialized. + */ + if (g->ops.mm.mmu_fault.setup_sw != NULL) { + err = g->ops.mm.mmu_fault.setup_sw(g); + if (err != 0) { + return err; + } + } + + return 0; +} + static int nvgpu_init_mm_setup_sw(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; @@ -512,37 +551,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g) } #endif - err = nvgpu_alloc_sysmem_flush(g); + err = nvgpu_init_mm_components(g); if (err != 0) { return err; } - err = nvgpu_init_mm_setup_bar(g); - if (err != 0) { - return err; - } - - err = nvgpu_init_mm_setup_vm(g); - if (err != 0) { - return err; - } - - err = nvgpu_init_mmu_debug(mm); - if (err != 0) { - return err; - } - - /* - * Some chips support replayable MMU faults. For such chips make sure - * SW is initialized. - */ - if (g->ops.mm.mmu_fault.setup_sw != NULL) { - err = g->ops.mm.mmu_fault.setup_sw(g); - if (err != 0) { - return err; - } - } - if ((g->ops.fb.fb_ecc_init != NULL) && !g->ecc.initialized) { err = g->ops.fb.fb_ecc_init(g); if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 09a43199c..73dd37bef 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -389,21 +389,12 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) } #endif -static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm, +static int nvgpu_vm_init_user_vma(struct gk20a *g, struct vm_gk20a *vm, u64 user_vma_start, u64 user_vma_limit, - u64 user_lp_vma_start, u64 user_lp_vma_limit, - u64 kernel_vma_start, u64 kernel_vma_limit, - u64 kernel_vma_flags, const char *name) + const char *name) { int err = 0; char alloc_name[32]; - size_t name_len; - - name_len = strlen("gk20a_") + strlen(name); - if (name_len >= 32U) { - nvgpu_err(g, "Invalid MAX_NAME_SIZE %lu %u", name_len, 32U); - return -EINVAL; - } /* * User VMA. @@ -432,6 +423,15 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm, vm->vma[0] = &vm->kernel; vm->vma[1] = &vm->kernel; } + return 0; +} + +static int nvgpu_vm_init_user_lp_vma(struct gk20a *g, struct vm_gk20a *vm, + u64 user_lp_vma_start, u64 user_lp_vma_limit, + const char *name) +{ + int err = 0; + char alloc_name[32]; /* * User VMA for large pages when a split address range is used. @@ -450,9 +450,18 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm, GPU_ALLOC_GVA_SPACE, BUDDY_ALLOCATOR); if (err != 0) { - goto clean_up_allocators; + return err; } } + return 0; +} + +static int nvgpu_vm_init_kernel_vma(struct gk20a *g, struct vm_gk20a *vm, + u64 kernel_vma_start, u64 kernel_vma_limit, + u64 kernel_vma_flags, const char *name) +{ + int err = 0; + char alloc_name[32]; /* * Kernel VMA. @@ -471,10 +480,46 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm, kernel_vma_flags, BUDDY_ALLOCATOR); if (err != 0) { - goto clean_up_allocators; + return err; } } return 0; +} + +static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm, + u64 user_vma_start, u64 user_vma_limit, + u64 user_lp_vma_start, u64 user_lp_vma_limit, + u64 kernel_vma_start, u64 kernel_vma_limit, + u64 kernel_vma_flags, const char *name) +{ + int err = 0; + size_t name_len; + + name_len = strlen("gk20a_") + strlen(name); + if (name_len >= 32U) { + nvgpu_err(g, "Invalid MAX_NAME_SIZE %lu %u", name_len, 32U); + return -EINVAL; + } + + err = nvgpu_vm_init_user_vma(g, vm, + user_vma_start, user_vma_limit, name); + if (err != 0) { + return err; + } + + err = nvgpu_vm_init_user_lp_vma(g, vm, + user_lp_vma_start, user_lp_vma_limit, name); + if (err != 0) { + goto clean_up_allocators; + } + + err = nvgpu_vm_init_kernel_vma(g, vm, kernel_vma_start, + kernel_vma_limit, kernel_vma_flags, name); + if (err != 0) { + goto clean_up_allocators; + } + + return 0; clean_up_allocators: if (nvgpu_alloc_initialized(&vm->kernel)) {