gpu: nvgpu: move local golden context memory allocation to poweorn

- Separate out local golden context memory allocation from
  nvgpu_gr_global_ctx_init_local_golden_image() into a new function
  nvgpu_gr_global_ctx_alloc_local_golden_image().
- Add a new member local_golden_image_copy to struct
  nvgpu_gr_obj_ctx_golden_image to store copy used for context
  verification.
- Allocate local golden context memory from nvgpu_gr_obj_ctx_init()
  which is called during poweron path.
- Remove memory allocation from nvgpu_gr_obj_ctx_save_golden_ctx().
- Disable test test_gr_obj_ctx_error_injection since it needs rework
  to accomodate the new changes.
- Fix below tests to allocate local golden context memory :
  test_gr_global_ctx_local_ctx_error_injection
  test_gr_setup_alloc_obj_ctx

Bug 3307637

Change-Id: I2f760d524881fd328346838ea9ce0234358f8e51
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2633713
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2021-11-29 17:08:06 +05:30
committed by mobile promotions
parent e1d6b8af8d
commit 9f55801a15
11 changed files with 102 additions and 127 deletions

View File

@@ -658,7 +658,7 @@ test_gr_intr_without_channel.gr_intr_channel_free=0
[nvgpu_gr_obj_ctx]
test_gr_init_setup_cleanup.gr_obj_ctx_cleanup=0
test_gr_init_setup_ready.gr_obj_ctx_setup=0
test_gr_obj_ctx_error_injection.gr_obj_ctx_alloc_errors=0
test_gr_obj_ctx_error_injection.gr_obj_ctx_alloc_errors=2
[nvgpu_gr_setup]
test_gr_init_setup_cleanup.gr_setup_cleanup=0

View File

@@ -204,8 +204,6 @@ int test_gr_global_ctx_local_ctx_error_injection(struct unit_module *m,
struct nvgpu_mem mem;
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image;
struct nvgpu_gr_global_ctx_local_golden_image *local_golden_image_bk;
struct nvgpu_posix_fault_inj *kmem_fi =
nvgpu_kmem_get_fault_injection();
/* Allocate dummy memory */
err = nvgpu_dma_alloc(g, DUMMY_SIZE, &mem);
@@ -213,41 +211,27 @@ int test_gr_global_ctx_local_ctx_error_injection(struct unit_module *m,
unit_return_fail(m, "failed to allocate dummy memory");
}
/* Fail allocation of nvgpu_gr_global_ctx_local_golden_image struct */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
local_golden_image = nvgpu_gr_global_ctx_init_local_golden_image(g,
&mem, DUMMY_SIZE);
if (local_golden_image != NULL) {
unit_return_fail(m, "unexpected success");
}
/* Fail allocation of local_golden_image->context */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 1);
local_golden_image = nvgpu_gr_global_ctx_init_local_golden_image(g,
&mem, DUMMY_SIZE);
if (local_golden_image != NULL) {
unit_return_fail(m, "unexpected success");
}
/* Successful allocation of local golden context */
nvgpu_posix_enable_fault_injection(kmem_fi, false, 0);
local_golden_image = nvgpu_gr_global_ctx_init_local_golden_image(g,
&mem, DUMMY_SIZE);
if (local_golden_image == NULL) {
err = nvgpu_gr_global_ctx_alloc_local_golden_image(g, &local_golden_image, DUMMY_SIZE);
if (err != 0) {
unit_return_fail(m, "failed to initialize local golden image");
}
nvgpu_gr_global_ctx_init_local_golden_image(g, local_golden_image, &mem, DUMMY_SIZE);
/* Trigger flush error during context load */
g->ops.mm.cache.l2_flush = dummy_l2_flush;
nvgpu_gr_global_ctx_load_local_golden_image(g,
local_golden_image, &mem);
/* Allocate dummy local golden context image */
local_golden_image_bk = nvgpu_gr_global_ctx_init_local_golden_image(g,
&mem, DUMMY_SIZE);
if (local_golden_image_bk == NULL) {
err = nvgpu_gr_global_ctx_alloc_local_golden_image(g, &local_golden_image_bk, DUMMY_SIZE);
if (err != 0) {
unit_return_fail(m, "failed to initialize local golden image");
}
nvgpu_gr_global_ctx_init_local_golden_image(g, local_golden_image_bk, &mem, DUMMY_SIZE);
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
bool valid;

View File

@@ -81,7 +81,8 @@ int test_gr_global_ctx_alloc_error_injection(struct unit_module *m,
*
* Test Type: Feature, Error guessing
*
* Targets: nvgpu_gr_global_ctx_init_local_golden_image,
* Targets: nvgpu_gr_global_ctx_alloc_local_golden_image,
* nvgpu_gr_global_ctx_init_local_golden_image,
* nvgpu_gr_global_ctx_load_local_golden_image,
* nvgpu_gr_global_ctx_compare_golden_images,
* nvgpu_gr_global_ctx_deinit_local_golden_image

View File

@@ -314,42 +314,6 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
unit_return_fail(m, "unexpected success");
}
/*
* Fail first local golden image allocation in
* nvgpu_gr_global_ctx_init_local_golden_image()
*/
nvgpu_posix_enable_fault_injection(local_golden_image_fi, true, 0);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, vm, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
}
/*
* Fail second local golden image allocation in
* nvgpu_gr_global_ctx_init_local_golden_image()
*/
nvgpu_posix_enable_fault_injection(local_golden_image_fi, true, 1);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, vm, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
}
/*
* Fail third local golden image allocation in
* nvgpu_gr_global_ctx_init_local_golden_image()
*/
nvgpu_posix_enable_fault_injection(local_golden_image_fi, true, 2);
err = nvgpu_gr_obj_ctx_alloc(g, golden_image, global_desc, desc,
config, gr_ctx, subctx, vm, &inst_block,
VOLTA_COMPUTE_A, 0, false, false);
if (err == 0) {
unit_return_fail(m, "unexpected success");
}
/* Disable error injection */
nvgpu_posix_enable_fault_injection(local_golden_image_fi, false, 0);
@@ -434,7 +398,7 @@ int test_gr_obj_ctx_error_injection(struct unit_module *m,
struct unit_module_test nvgpu_gr_obj_ctx_tests[] = {
UNIT_TEST(gr_obj_ctx_setup, test_gr_init_setup_ready, NULL, 0),
UNIT_TEST(gr_obj_ctx_alloc_errors, test_gr_obj_ctx_error_injection, NULL, 0),
UNIT_TEST(gr_obj_ctx_alloc_errors, test_gr_obj_ctx_error_injection, NULL, 2),
UNIT_TEST(gr_obj_ctx_cleanup, test_gr_init_setup_cleanup, NULL, 0),
};

View File

@@ -720,6 +720,17 @@ int test_gr_setup_alloc_obj_ctx(struct unit_module *m,
/* Set a default size for golden image */
g->gr->golden_image->size = 0x800;
err = nvgpu_gr_global_ctx_alloc_local_golden_image(g,
&g->gr->golden_image->local_golden_image, 0x800);
if (err != 0) {
unit_return_fail(m, "local golden image alloc failed\n");
}
err = nvgpu_gr_global_ctx_alloc_local_golden_image(g,
&g->gr->golden_image->local_golden_image_copy, 0x800);
if (err != 0) {
unit_return_fail(m, "local golden image copy alloc failed\n");
}
/* Test with channel and tsg */
err = gr_test_setup_allocate_ch_tsg(m, g);
if (err != 0) {