gpu: nvgpu: add API to query page table memhandles

Add API to query all memhandles used for pde and pte.
- Some direct pde/pte allocation should also add entry to the pd-cache
full list.
- Add OS API for querying MemServ handle from nvgpu_mem.
- Traverse through all pd-cache partial and full lists to get memhandles
for all pde/pte buffers.

Jira NVGPU-8284

Change-Id: I8e7adf1be1409264d24e17501eb7c32a81950728
Signed-off-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2735657
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Shashank Singh
2022-06-27 06:23:48 +00:00
committed by mobile promotions
parent 63e8de5106
commit 7abaeda619
18 changed files with 264 additions and 139 deletions

View File

@@ -357,7 +357,7 @@ test_mm_alloc_inst_block.alloc_inst_block=0
test_mm_init_hal.init_hal=0
test_mm_inst_block.inst_block=0
test_mm_page_sizes.page_sizes=0
test_mm_remove_mm_support.remove_support=0
test_mm_remove_mm_support.remove_support=2
test_mm_suspend.suspend=0
test_nvgpu_init_mm.init_mm=0
test_nvgpu_mm_setup_hw.init_mm_hw=0
@@ -838,38 +838,38 @@ test_page_faults_inst_block.inst_block_s2=0
test_page_faults_pending.pending=0
[pd_cache]
test_gpu_address.gpu_address=0
test_init_deinit.deinit=0
test_init_deinit.init_deinit=0
test_init_deinit.multi_init=0
test_offset_computation.offset_comp=0
test_pd_alloc_direct_fi.alloc_direct_oom=0
test_pd_alloc_fi.alloc_oom=0
test_pd_alloc_invalid_input.invalid_pd_alloc=0
test_pd_cache_alloc_gen.alloc_1024x256B_x11x3=0
test_pd_cache_alloc_gen.alloc_1024x256B_x16x15=0
test_pd_cache_alloc_gen.alloc_1024x256B_x16x1=0
test_pd_cache_alloc_gen.alloc_1024x256B_x32x1=0
test_pd_cache_alloc_gen.alloc_1x1024B=0
test_pd_cache_alloc_gen.alloc_1x2048B=0
test_pd_cache_alloc_gen.alloc_1x256B=0
test_pd_cache_alloc_gen.alloc_1x512B=0
test_pd_cache_alloc_gen.alloc_direct_1024x16PAGE=0
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE=0
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x15=0
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x1=0
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x4=0
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x32x24=0
test_pd_cache_alloc_gen.alloc_direct_1x16PAGE=0
test_pd_cache_alloc_gen.alloc_direct_1xPAGE=0
test_pd_cache_env_init.env_init=0
test_pd_cache_fini.fini=0
test_pd_cache_init.init=0
test_pd_cache_valid_alloc.valid_alloc=0
test_pd_free_empty_pd.free_empty=0
test_pd_write.write=0
test_per_pd_size.pd_packing=0
test_per_pd_size.pd_reusability=0
test_gpu_address.gpu_address=2
test_init_deinit.deinit=2
test_init_deinit.init_deinit=2
test_init_deinit.multi_init=2
test_offset_computation.offset_comp=2
test_pd_alloc_direct_fi.alloc_direct_oom=2
test_pd_alloc_fi.alloc_oom=2
test_pd_alloc_invalid_input.invalid_pd_alloc=2
test_pd_cache_alloc_gen.alloc_1024x256B_x11x3=2
test_pd_cache_alloc_gen.alloc_1024x256B_x16x15=2
test_pd_cache_alloc_gen.alloc_1024x256B_x16x1=2
test_pd_cache_alloc_gen.alloc_1024x256B_x32x1=2
test_pd_cache_alloc_gen.alloc_1x1024B=2
test_pd_cache_alloc_gen.alloc_1x2048B=2
test_pd_cache_alloc_gen.alloc_1x256B=2
test_pd_cache_alloc_gen.alloc_1x512B=2
test_pd_cache_alloc_gen.alloc_direct_1024x16PAGE=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x15=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x1=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x4=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x32x24=2
test_pd_cache_alloc_gen.alloc_direct_1x16PAGE=2
test_pd_cache_alloc_gen.alloc_direct_1xPAGE=2
test_pd_cache_env_init.env_init=2
test_pd_cache_fini.fini=2
test_pd_cache_init.init=2
test_pd_cache_valid_alloc.valid_alloc=2
test_pd_free_empty_pd.free_empty=2
test_pd_write.write=2
test_per_pd_size.pd_packing=2
test_per_pd_size.pd_reusability=2
[posix_bitops]
test_bit_setclear.bit_clear=0

View File

@@ -147,6 +147,10 @@ int test_ltc_init_support(struct unit_module *m,
unit_return_fail(m, "CIC Mon init failed\n");
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
/*
* Init dependent ECC unit
*/

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -96,7 +96,9 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
aperture_size = GK20A_PMU_VA_SIZE;
flags |= GPU_ALLOC_GVA_SPACE;
if (nvgpu_pd_cache_init(g) != 0) {
unit_err(m, "PD cache initialization failed\n");
}
/* Init vm with big_pages disabled */
test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
low_hole,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -267,6 +267,10 @@ int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_FAIL;
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -937,63 +937,63 @@ static int test_pd_cache_env_init(struct unit_module *m,
}
struct unit_module_test pd_cache_tests[] = {
UNIT_TEST(env_init, test_pd_cache_env_init, NULL, 0),
UNIT_TEST(init, test_pd_cache_init, NULL, 0),
UNIT_TEST(fini, test_pd_cache_fini, NULL, 0),
UNIT_TEST(env_init, test_pd_cache_env_init, NULL, 2),
UNIT_TEST(init, test_pd_cache_init, NULL, 2),
UNIT_TEST(fini, test_pd_cache_fini, NULL, 2),
/*
* Requirement verification tests.
*/
UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4",
valid_alloc, test_pd_cache_valid_alloc, NULL, 0),
valid_alloc, test_pd_cache_valid_alloc, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4",
pd_packing, test_per_pd_size, do_test_pd_cache_packing_size, 0),
pd_packing, test_per_pd_size, do_test_pd_cache_packing_size, 2),
UNIT_TEST_REQ("NVGPU-RQCD-118.C1", PD_CACHE_REQ2_UID, "V3",
pd_reusability, test_per_pd_size, do_test_pd_reusability, 0),
pd_reusability, test_per_pd_size, do_test_pd_reusability, 2),
UNIT_TEST_REQ("NVGPU-RQCD-122.C1", PD_CACHE_REQ3_UID, "V3",
write, test_pd_write, NULL, 0),
write, test_pd_write, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-123.C1", PD_CACHE_REQ4_UID, "V2",
gpu_address, test_gpu_address, NULL, 0),
gpu_address, test_gpu_address, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-126.C1,2", PD_CACHE_REQ5_UID, "V1",
offset_comp, test_offset_computation, NULL, 0),
offset_comp, test_offset_computation, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-124.C1", PD_CACHE_REQ6_UID, "V3",
init_deinit, test_init_deinit, NULL, 0),
init_deinit, test_init_deinit, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-155.C1", PD_CACHE_REQ7_UID, "V2",
multi_init, test_init_deinit, NULL, 0),
multi_init, test_init_deinit, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-125.C1", PD_CACHE_REQ8_UID, "V2",
deinit, test_init_deinit, NULL, 0),
deinit, test_init_deinit, NULL, 2),
/*
* Direct allocs.
*/
UNIT_TEST(alloc_direct_1xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1xPAGE, 0),
UNIT_TEST(alloc_direct_1024xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE, 0),
UNIT_TEST(alloc_direct_1x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1x16PAGE, 0),
UNIT_TEST(alloc_direct_1024x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1024x16PAGE, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x32x24, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x32x24, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x4, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x4, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x15, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x15, 0),
UNIT_TEST(alloc_direct_1024xPAGE_x16x1, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x1, 0),
UNIT_TEST(alloc_direct_1xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1xPAGE, 2),
UNIT_TEST(alloc_direct_1024xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE, 2),
UNIT_TEST(alloc_direct_1x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1x16PAGE, 2),
UNIT_TEST(alloc_direct_1024x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1024x16PAGE, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x32x24, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x32x24, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x4, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x4, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x15, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x15, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x1, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x1, 2),
/*
* Cached allocs.
*/
UNIT_TEST(alloc_1x256B, test_pd_cache_alloc_gen, &alloc_1x256B, 0),
UNIT_TEST(alloc_1x512B, test_pd_cache_alloc_gen, &alloc_1x512B, 0),
UNIT_TEST(alloc_1x1024B, test_pd_cache_alloc_gen, &alloc_1x1024B, 0),
UNIT_TEST(alloc_1x2048B, test_pd_cache_alloc_gen, &alloc_1x2048B, 0),
UNIT_TEST(alloc_1024x256B_x16x15, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x15, 0),
UNIT_TEST(alloc_1024x256B_x16x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x1, 0),
UNIT_TEST(alloc_1024x256B_x32x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x32x1, 0),
UNIT_TEST(alloc_1024x256B_x11x3, test_pd_cache_alloc_gen, &alloc_1024x256B_x11x3, 0),
UNIT_TEST(alloc_1x256B, test_pd_cache_alloc_gen, &alloc_1x256B, 2),
UNIT_TEST(alloc_1x512B, test_pd_cache_alloc_gen, &alloc_1x512B, 2),
UNIT_TEST(alloc_1x1024B, test_pd_cache_alloc_gen, &alloc_1x1024B, 2),
UNIT_TEST(alloc_1x2048B, test_pd_cache_alloc_gen, &alloc_1x2048B, 2),
UNIT_TEST(alloc_1024x256B_x16x15, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x15, 2),
UNIT_TEST(alloc_1024x256B_x16x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x1, 2),
UNIT_TEST(alloc_1024x256B_x32x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x32x1, 2),
UNIT_TEST(alloc_1024x256B_x11x3, test_pd_cache_alloc_gen, &alloc_1024x256B_x11x3, 2),
/*
* Error path testing.
*/
UNIT_TEST(free_empty, test_pd_free_empty_pd, NULL, 0),
UNIT_TEST(invalid_pd_alloc, test_pd_alloc_invalid_input, NULL, 0),
UNIT_TEST(alloc_direct_oom, test_pd_alloc_direct_fi, NULL, 0),
UNIT_TEST(alloc_oom, test_pd_alloc_fi, NULL, 0),
UNIT_TEST(free_empty, test_pd_free_empty_pd, NULL, 2),
UNIT_TEST(invalid_pd_alloc, test_pd_alloc_invalid_input, NULL, 2),
UNIT_TEST(alloc_direct_oom, test_pd_alloc_direct_fi, NULL, 2),
UNIT_TEST(alloc_oom, test_pd_alloc_fi, NULL, 2),
};
UNIT_MODULE(pd_cache, pd_cache_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -186,6 +186,10 @@ int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -184,6 +184,9 @@ int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0;
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -171,6 +171,9 @@ int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0;
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -176,6 +176,9 @@ int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0;
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -217,6 +217,9 @@ int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
g->log_mask = 0;
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -453,6 +453,9 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
current_module = m;
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
@@ -741,7 +744,7 @@ struct unit_module_test nvgpu_mm_mm_tests[] = {
UNIT_TEST(init_mm, test_nvgpu_init_mm, NULL, 0),
UNIT_TEST(init_mm_hw, test_nvgpu_mm_setup_hw, NULL, 0),
UNIT_TEST(suspend, test_mm_suspend, NULL, 0),
UNIT_TEST(remove_support, test_mm_remove_mm_support, NULL, 0),
UNIT_TEST(remove_support, test_mm_remove_mm_support, NULL, 2),
UNIT_TEST(page_sizes, test_mm_page_sizes, NULL, 0),
UNIT_TEST(inst_block, test_mm_inst_block, NULL, 0),
UNIT_TEST(alloc_inst_block, test_mm_alloc_inst_block, NULL, 0),

View File

@@ -274,7 +274,9 @@ int test_page_faults_init(struct unit_module *m, struct gk20a *g, void *args)
}
init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -144,6 +144,10 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
unit_return_fail(m, "nvgpu_sync_early_init failed\n");
}
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
/*
* Alloc memory for g->syncpt_mem
*/