gpu: nvgpu: fix l2_flush errors during rmmod

The function gk20a_mm_l2_flush incorrectly returns an error value
when it skips l2_flush when hardware is powered off.
This causes the following prints to occur even when the behavior is expected.

gv11b_mm_l2_flush:43 [ERR] gk20a_mm_l2_flush failed
nvgpu_gmmu_unmap_locked:1043 [ERR] gk20a_mm_l2_flush[1] failed

The above errors occur from the following paths
1) gk20a_remove -> gk20a_free_cb -> gk20a_remove_support ->
	nvgpu_pmu_remove_support -> nvgpu_pmu_pg_deinit ->
	nvgpu_dma_unmap_free

2) gk20a_remove -> gk20a_free_cb -> gk20a_remove_support ->
	nvgpu_remove_mm_support -> gv11b_mm_mmu_fault_info_mem_destroy ->
        nvgpu_dma_unmap_free

Since, these do not belong in the Poweron/Poweroff path, its okay to
skip flushing them when the hardware has powered off.

Fixed the userspace tests by allocating g->mm.bar1.vm to prevent NULL access
in gv11b_mm_l2_flush->tlb_invalidate.

Jira LS-77

Change-Id: I3ca71f5118daf4b2eeacfe5bf83d94317f29d446
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2523751
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2021-05-03 16:33:00 +05:30
committed by mobile promotions
parent 74deaae0bf
commit 096f4ef055
10 changed files with 116 additions and 14 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -174,7 +174,8 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
goto hw_was_off;
gk20a_idle_nosuspend(g);
return 0;
}
if (g->ops.mm.get_flush_retries != NULL) {
@@ -220,8 +221,6 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
}
nvgpu_mutex_release(&mm->l2_op_lock);
hw_was_off:
gk20a_idle_nosuspend(g);
return err;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -47,6 +47,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
struct gk20a *g, void *args)
{
int err;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm;
struct nvgpu_gr_ctx_desc *desc;
struct nvgpu_gr_global_ctx_buffer_desc *global_desc;
@@ -55,6 +56,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
nvgpu_dma_alloc_get_fault_injection();
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
u64 low_hole = SZ_4K * 16UL;
desc = nvgpu_gr_ctx_desc_alloc(g);
if (!desc) {
@@ -69,6 +71,19 @@ int test_gr_ctx_error_injection(struct unit_module *m,
unit_return_fail(m, "failed to allocate VM");
}
mm->bar1.aperture_size = 16 << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
/* Try to free gr_ctx before it is allocated. */
nvgpu_gr_ctx_free(g, gr_ctx, NULL, NULL);
@@ -215,6 +230,7 @@ int test_gr_ctx_error_injection(struct unit_module *m,
nvgpu_free_gr_ctx_struct(g, gr_ctx);
nvgpu_gr_ctx_desc_free(g, desc);
nvgpu_vm_put(vm);
nvgpu_vm_put(g->mm.bar1.vm);
return UNIT_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,6 +39,7 @@
#include <nvgpu/netlist.h>
#include <nvgpu/gr/gr.h>
#include <hal/ltc/intr/ltc_intr_gv11b.h>
#include <nvgpu/vm.h>
#include "nvgpu-ltc.h"
@@ -100,6 +101,8 @@ int test_ltc_init_support(struct unit_module *m,
struct nvgpu_ltc *save_ptr;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
struct mm_gk20a *mm = &g->mm;
u64 low_hole;
if (nvgpu_posix_io_add_reg_space(g, mc_boot_0_r(), 0xfff) != 0) {
unit_err(m, "%s: failed to create register space\n", __func__);
@@ -239,6 +242,20 @@ int test_ltc_init_support(struct unit_module *m,
unit_return_fail(m, "g->ops.ltc.init_ltc_support failed\n");
}
low_hole = SZ_4K * 16UL;
mm->bar1.aperture_size = 16 << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
return UNIT_SUCCESS;
}
@@ -396,6 +413,7 @@ int test_ltc_remove_support(struct unit_module *m,
struct gk20a *g, void *args)
{
g->ops.ltc.ltc_remove_support(g);
nvgpu_vm_put(g->mm.bar1.vm);
return UNIT_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -205,6 +205,19 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "pd cache initialization failed\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -350,6 +350,19 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.get_default_va_sizes(NULL, &mm->channel.user_size,
&mm->channel.kernel_size);
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
mm->pmu.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
@@ -399,6 +412,7 @@ int test_nvgpu_gmmu_clean(struct unit_module *m, struct gk20a *g, void *args)
{
g->log_mask = 0;
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar1.vm);
return UNIT_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -337,8 +337,7 @@ int test_gk20a_mm_l2_flush(struct unit_module *m, struct gk20a *g, void *args)
err = gk20a_mm_l2_flush(g, invalidate);
if ((branch == F_GK20A_L2_FLUSH_PENDING_TRUE) ||
(branch == F_GK20A_L2_FLUSH_OUTSTANDING_TRUE) ||
(branch == F_GK20A_L2_FLUSH_NVGPU_POWERED_OFF)) {
(branch == F_GK20A_L2_FLUSH_OUTSTANDING_TRUE)) {
unit_assert(err != 0, goto done);
} else {
unit_assert(err == 0, goto done);

View File

@@ -516,9 +516,19 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_SUCCESS;
}
static int stub_mm_l2_flush(struct gk20a *g, bool invalidate)
{
return -ETIMEDOUT;
}
int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args)
{
int err;
int (*save_func)(struct gk20a *g, bool inv);
/* Allow l2_flush failure by stubbing the call. */
save_func = g->ops.mm.cache.l2_flush;
g->ops.mm.cache.l2_flush = stub_mm_l2_flush;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_OFF);
err = nvgpu_mm_suspend(g);
@@ -527,6 +537,9 @@ int test_mm_suspend(struct unit_module *m, struct gk20a *g, void *args)
err);
}
/* restore original l2_flush method */
g->ops.mm.cache.l2_flush = save_func;
nvgpu_set_power_state(g, NVGPU_STATE_POWERED_ON);
err = nvgpu_mm_suspend(g);
if (err != 0) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -210,6 +210,20 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
unit_return_fail(m, "'system' nvgpu_vm_init failed\n");
}
mm->bar1.aperture_size = U32(16) << 20U;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
SZ_64K,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, SZ_64K),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
return -ENOMEM;
}
/* BAR2 memory space */
mm->bar2.aperture_size = U32(32) << 20U;
mm->bar2.vm = nvgpu_vm_init(g,
@@ -342,6 +356,7 @@ int test_page_faults_clean(struct unit_module *m, struct gk20a *g, void *args)
g->ops.mm.mmu_fault.info_mem_destroy(g);
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar2.vm);
nvgpu_vm_put(g->mm.bar1.vm);
return UNIT_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -197,6 +197,7 @@ static int init_test_env(struct unit_module *m, struct gk20a *g)
g->ops.mm.init_inst_block = hal_mm_init_inst_block;
g->ops.mm.vm_as_free_share = hal_vm_as_free_share;
g->ops.mm.vm_bind_channel = nvgpu_vm_bind_channel;
g->ops.bus.bar1_bind = NULL;
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache init failed.\n");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -99,6 +99,19 @@ static int init_channel_vm(struct unit_module *m, struct nvgpu_channel *ch)
ch->vm = mm->pmu.vm;
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
mm->bar1.vm = nvgpu_vm_init(g,
g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,
0ULL,
nvgpu_safe_sub_u64(mm->bar1.aperture_size, low_hole),
0ULL,
true, false, false,
"bar1");
if (mm->bar1.vm == NULL) {
unit_return_fail(m, "nvgpu_vm_init failed\n");
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "pd cache initialization failed\n");
}
@@ -533,6 +546,7 @@ int test_sync_deinit(struct unit_module *m, struct gk20a *g, void *args)
{
nvgpu_vm_put(g->mm.pmu.vm);
nvgpu_vm_put(g->mm.bar1.vm);
if (ch != NULL) {
nvgpu_kfree(g, ch);