mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: divide functions to reduce complexity
This patch divides nvgpu_init_mm_setup_sw() and nvgpu_vm_init_vma_allocators() functions to reduce code complexity. Jira NVGPU-4780 Change-Id: I3d94cf44aee2e5e43471b97055c51fa2b0f83d52 Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2291817 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
6ed4f57972
commit
14ce94df43
@@ -467,6 +467,45 @@ static int nvgpu_init_mm_setup_vm(struct gk20a *g)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_init_mm_components(struct gk20a *g)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
struct mm_gk20a *mm = &g->mm;
|
||||||
|
|
||||||
|
err = nvgpu_alloc_sysmem_flush(g);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_init_mm_setup_bar(g);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_init_mm_setup_vm(g);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_init_mmu_debug(mm);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some chips support replayable MMU faults. For such chips make sure
|
||||||
|
* SW is initialized.
|
||||||
|
*/
|
||||||
|
if (g->ops.mm.mmu_fault.setup_sw != NULL) {
|
||||||
|
err = g->ops.mm.mmu_fault.setup_sw(g);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int nvgpu_init_mm_setup_sw(struct gk20a *g)
|
static int nvgpu_init_mm_setup_sw(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct mm_gk20a *mm = &g->mm;
|
struct mm_gk20a *mm = &g->mm;
|
||||||
@@ -512,37 +551,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = nvgpu_alloc_sysmem_flush(g);
|
err = nvgpu_init_mm_components(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_init_mm_setup_bar(g);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nvgpu_init_mm_setup_vm(g);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nvgpu_init_mmu_debug(mm);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some chips support replayable MMU faults. For such chips make sure
|
|
||||||
* SW is initialized.
|
|
||||||
*/
|
|
||||||
if (g->ops.mm.mmu_fault.setup_sw != NULL) {
|
|
||||||
err = g->ops.mm.mmu_fault.setup_sw(g);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((g->ops.fb.fb_ecc_init != NULL) && !g->ecc.initialized) {
|
if ((g->ops.fb.fb_ecc_init != NULL) && !g->ecc.initialized) {
|
||||||
err = g->ops.fb.fb_ecc_init(g);
|
err = g->ops.fb.fb_ecc_init(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
|||||||
@@ -389,21 +389,12 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm,
|
static int nvgpu_vm_init_user_vma(struct gk20a *g, struct vm_gk20a *vm,
|
||||||
u64 user_vma_start, u64 user_vma_limit,
|
u64 user_vma_start, u64 user_vma_limit,
|
||||||
u64 user_lp_vma_start, u64 user_lp_vma_limit,
|
const char *name)
|
||||||
u64 kernel_vma_start, u64 kernel_vma_limit,
|
|
||||||
u64 kernel_vma_flags, const char *name)
|
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
char alloc_name[32];
|
char alloc_name[32];
|
||||||
size_t name_len;
|
|
||||||
|
|
||||||
name_len = strlen("gk20a_") + strlen(name);
|
|
||||||
if (name_len >= 32U) {
|
|
||||||
nvgpu_err(g, "Invalid MAX_NAME_SIZE %lu %u", name_len, 32U);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User VMA.
|
* User VMA.
|
||||||
@@ -432,6 +423,15 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
vm->vma[0] = &vm->kernel;
|
vm->vma[0] = &vm->kernel;
|
||||||
vm->vma[1] = &vm->kernel;
|
vm->vma[1] = &vm->kernel;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvgpu_vm_init_user_lp_vma(struct gk20a *g, struct vm_gk20a *vm,
|
||||||
|
u64 user_lp_vma_start, u64 user_lp_vma_limit,
|
||||||
|
const char *name)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
char alloc_name[32];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User VMA for large pages when a split address range is used.
|
* User VMA for large pages when a split address range is used.
|
||||||
@@ -450,9 +450,18 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
GPU_ALLOC_GVA_SPACE,
|
GPU_ALLOC_GVA_SPACE,
|
||||||
BUDDY_ALLOCATOR);
|
BUDDY_ALLOCATOR);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up_allocators;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvgpu_vm_init_kernel_vma(struct gk20a *g, struct vm_gk20a *vm,
|
||||||
|
u64 kernel_vma_start, u64 kernel_vma_limit,
|
||||||
|
u64 kernel_vma_flags, const char *name)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
char alloc_name[32];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel VMA.
|
* Kernel VMA.
|
||||||
@@ -471,10 +480,46 @@ static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
kernel_vma_flags,
|
kernel_vma_flags,
|
||||||
BUDDY_ALLOCATOR);
|
BUDDY_ALLOCATOR);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up_allocators;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvgpu_vm_init_vma_allocators(struct gk20a *g, struct vm_gk20a *vm,
|
||||||
|
u64 user_vma_start, u64 user_vma_limit,
|
||||||
|
u64 user_lp_vma_start, u64 user_lp_vma_limit,
|
||||||
|
u64 kernel_vma_start, u64 kernel_vma_limit,
|
||||||
|
u64 kernel_vma_flags, const char *name)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
size_t name_len;
|
||||||
|
|
||||||
|
name_len = strlen("gk20a_") + strlen(name);
|
||||||
|
if (name_len >= 32U) {
|
||||||
|
nvgpu_err(g, "Invalid MAX_NAME_SIZE %lu %u", name_len, 32U);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_vm_init_user_vma(g, vm,
|
||||||
|
user_vma_start, user_vma_limit, name);
|
||||||
|
if (err != 0) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_vm_init_user_lp_vma(g, vm,
|
||||||
|
user_lp_vma_start, user_lp_vma_limit, name);
|
||||||
|
if (err != 0) {
|
||||||
|
goto clean_up_allocators;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = nvgpu_vm_init_kernel_vma(g, vm, kernel_vma_start,
|
||||||
|
kernel_vma_limit, kernel_vma_flags, name);
|
||||||
|
if (err != 0) {
|
||||||
|
goto clean_up_allocators;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
clean_up_allocators:
|
clean_up_allocators:
|
||||||
if (nvgpu_alloc_initialized(&vm->kernel)) {
|
if (nvgpu_alloc_initialized(&vm->kernel)) {
|
||||||
|
|||||||
Reference in New Issue
Block a user