gpu: nvgpu: add API to query page table memhandles

Add API to query all memhandles used for pde and pte.
- Some direct pde/pte allocation should also add entry to the pd-cache
full list.
- Add OS API for querying MemServ handle from nvgpu_mem.
- Traverse through all pd-cache partial and full lists to get memhandles
for all pde/pte buffers.

Jira NVGPU-8284

Change-Id: I8e7adf1be1409264d24e17501eb7c32a81950728
Signed-off-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2735657
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Shashank Singh
2022-06-27 06:23:48 +00:00
committed by mobile promotions
parent 63e8de5106
commit 7abaeda619
18 changed files with 264 additions and 139 deletions

View File

@@ -125,6 +125,7 @@ int nvgpu_pd_cache_init(struct gk20a *g)
nvgpu_init_list_node(&cache->full[i]); nvgpu_init_list_node(&cache->full[i]);
nvgpu_init_list_node(&cache->partial[i]); nvgpu_init_list_node(&cache->partial[i]);
} }
nvgpu_init_list_node(&cache->direct);
cache->mem_tree = NULL; cache->mem_tree = NULL;
@@ -137,6 +138,44 @@ int nvgpu_pd_cache_init(struct gk20a *g)
return 0; return 0;
} }
struct nvgpu_mem **nvgpu_pd_cache_get_nvgpu_mems(struct gk20a *g, uint32_t *no_of_mems)
{
struct nvgpu_mem **mem_arr;
uint32_t count = 0U;
u32 i;
struct nvgpu_pd_mem_entry *nvgpu_pdmem_entry;
struct nvgpu_pd_cache *cache = g->mm.pd_cache;
for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
count = count + nvgpu_list_length(&cache->full[i]) +
nvgpu_list_length(&cache->partial[i]);
}
count = count + nvgpu_list_length(&cache->direct);
mem_arr = nvgpu_kzalloc(g, sizeof(*mem_arr) * count);
if (mem_arr == NULL) {
nvgpu_err(g, "Failed to alloc mem array");
return NULL;
}
*no_of_mems = count;
count = 0U;
for (i = 0U; i < NVGPU_PD_CACHE_COUNT; i++) {
nvgpu_list_for_each_entry(nvgpu_pdmem_entry, &cache->full[i],
nvgpu_pd_mem_entry, list_entry) {
mem_arr[count++] = &nvgpu_pdmem_entry->mem;
}
nvgpu_list_for_each_entry(nvgpu_pdmem_entry, &cache->partial[i],
nvgpu_pd_mem_entry, list_entry) {
mem_arr[count++] = &nvgpu_pdmem_entry->mem;
}
}
nvgpu_list_for_each_entry(nvgpu_pdmem_entry, &cache->direct,
nvgpu_pd_mem_entry, list_entry) {
mem_arr[count++] = &nvgpu_pdmem_entry->mem;
}
return mem_arr;
}
void nvgpu_pd_cache_fini(struct gk20a *g) void nvgpu_pd_cache_fini(struct gk20a *g)
{ {
u32 i; u32 i;
@@ -150,31 +189,28 @@ void nvgpu_pd_cache_fini(struct gk20a *g)
nvgpu_assert(nvgpu_list_empty(&cache->full[i])); nvgpu_assert(nvgpu_list_empty(&cache->full[i]));
nvgpu_assert(nvgpu_list_empty(&cache->partial[i])); nvgpu_assert(nvgpu_list_empty(&cache->partial[i]));
} }
nvgpu_assert(nvgpu_list_empty(&cache->direct));
nvgpu_kfree(g, g->mm.pd_cache); nvgpu_kfree(g, g->mm.pd_cache);
g->mm.pd_cache = NULL; g->mm.pd_cache = NULL;
} }
/* static int nvgpu_pd_cache_alloc_direct_locked(struct gk20a *g,
* This is the simple pass-through for greater than page or page sized PDs.
*
* Note: this does not need the cache lock since it does not modify any of the
* PD cache data structures.
*/
int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u32 bytes) struct nvgpu_gmmu_pd *pd, u32 bytes)
{ {
int err; int err;
unsigned long flags = 0; unsigned long flags = 0;
struct nvgpu_pd_mem_entry *pentry;
struct nvgpu_pd_cache *cache = g->mm.pd_cache;
pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); pentry = nvgpu_kzalloc(g, sizeof(*pentry));
if (pentry == NULL) {
pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem)); nvgpu_err(g, "OOM allocating pentry!");
if (pd->mem == NULL) {
nvgpu_err(g, "OOM allocating nvgpu_mem struct!");
return -ENOMEM; return -ENOMEM;
} }
pd_dbg(g, "PD-Alloc [D] %u bytes", bytes);
/* /*
* If bytes == NVGPU_CPU_PAGE_SIZE then it's impossible to get a discontiguous DMA * If bytes == NVGPU_CPU_PAGE_SIZE then it's impossible to get a discontiguous DMA
* allocation. Some DMA implementations may, despite this fact, still * allocation. Some DMA implementations may, despite this fact, still
@@ -189,19 +225,43 @@ int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
flags = NVGPU_DMA_PHYSICALLY_ADDRESSED; flags = NVGPU_DMA_PHYSICALLY_ADDRESSED;
} }
err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem); err = nvgpu_dma_alloc_flags(g, flags, bytes, &(pentry->mem));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "OOM allocating page directory!"); nvgpu_err(g, "OOM allocating page directory!");
nvgpu_kfree(g, pd->mem); nvgpu_kfree(g, pd->mem);
return -ENOMEM; return -ENOMEM;
} }
pd->cached = false; nvgpu_list_add(&pentry->list_entry,
&cache->direct);
pd->mem = &pentry->mem;
pd->mem_offs = 0; pd->mem_offs = 0;
pentry->pd_size = bytes;
pentry->allocs = 1;
pd->cached = true;
pentry->tree_entry.key_start = (u64)(uintptr_t)&pentry->mem;
nvgpu_rbtree_insert(&pentry->tree_entry, &cache->mem_tree);
return 0; return 0;
} }
/*
* This is the simple pass-through for greater than page or page sized PDs.
*
* Note: this does not need the cache lock since it does not modify any of the
* PD cache data structures.
*/
int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u32 bytes)
{
int ret;
nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
ret = nvgpu_pd_cache_alloc_direct_locked(g, pd, bytes);
nvgpu_mutex_release(&g->mm.pd_cache->lock);
return ret;
}
/* /*
* Make a new nvgpu_pd_cache_entry and allocate a PD from it. Update the passed * Make a new nvgpu_pd_cache_entry and allocate a PD from it. Update the passed
* pd to reflect this allocation. * pd to reflect this allocation.
@@ -236,7 +296,7 @@ static int nvgpu_pd_cache_alloc_new(struct gk20a *g,
* allocation may work * allocation may work
*/ */
if (err == -ENOMEM) { if (err == -ENOMEM) {
return nvgpu_pd_cache_alloc_direct(g, pd, bytes); return nvgpu_pd_cache_alloc_direct_locked(g, pd, bytes);
} }
nvgpu_err(g, "Unable to DMA alloc!"); nvgpu_err(g, "Unable to DMA alloc!");
return -ENOMEM; return -ENOMEM;
@@ -377,49 +437,37 @@ int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
int err; int err;
nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
/* /*
* Simple case: PD is bigger than a page so just do a regular DMA * Simple case: PD is bigger than a page so just do a regular DMA
* alloc. * alloc.
*/ */
if (bytes >= NVGPU_PD_CACHE_SIZE) { if (bytes >= NVGPU_PD_CACHE_SIZE) {
err = nvgpu_pd_cache_alloc_direct(g, pd, bytes); err = nvgpu_pd_cache_alloc_direct_locked(g, pd, bytes);
if (err != 0) { if (err != 0) {
return err; goto release_lock;
} }
pd->pd_size = bytes; pd->pd_size = bytes;
return 0; goto release_lock;
} }
if (g->mm.pd_cache == NULL) { if (g->mm.pd_cache == NULL) {
nvgpu_do_assert(); nvgpu_do_assert();
return -ENOMEM; err = -ENOMEM;
goto release_lock;
} }
nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
err = nvgpu_pd_cache_alloc(g, g->mm.pd_cache, pd, bytes); err = nvgpu_pd_cache_alloc(g, g->mm.pd_cache, pd, bytes);
if (err == 0) { if (err == 0) {
pd->pd_size = bytes; pd->pd_size = bytes;
} }
release_lock:
nvgpu_mutex_release(&g->mm.pd_cache->lock); nvgpu_mutex_release(&g->mm.pd_cache->lock);
return err; return err;
} }
static void nvgpu_pd_cache_free_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd)
{
pd_dbg(g, "PD-Free [D] 0x%p", pd->mem);
if (pd->mem == NULL) {
return;
}
nvgpu_dma_free(g, pd->mem);
nvgpu_kfree(g, pd->mem);
pd->mem = NULL;
}
static void nvgpu_pd_cache_free_mem_entry(struct gk20a *g, static void nvgpu_pd_cache_free_mem_entry(struct gk20a *g,
struct nvgpu_pd_cache *cache, struct nvgpu_pd_cache *cache,
struct nvgpu_pd_mem_entry *pentry) struct nvgpu_pd_mem_entry *pentry)
@@ -435,7 +483,13 @@ static void nvgpu_pd_cache_do_free(struct gk20a *g,
struct nvgpu_pd_mem_entry *pentry, struct nvgpu_pd_mem_entry *pentry,
struct nvgpu_gmmu_pd *pd) struct nvgpu_gmmu_pd *pd)
{ {
u32 bit = pd->mem_offs / pentry->pd_size; u32 bit;
if (pd->mem == NULL) {
return;
}
bit = pd->mem_offs / pentry->pd_size;
/* Mark entry as free. */ /* Mark entry as free. */
nvgpu_clear_bit(bit, pentry->alloc_map); nvgpu_clear_bit(bit, pentry->alloc_map);
@@ -479,34 +533,20 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_look_up(
return nvgpu_pd_mem_entry_from_tree_entry(node); return nvgpu_pd_mem_entry_from_tree_entry(node);
} }
static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
struct nvgpu_gmmu_pd *pd)
{ {
struct gk20a *g = gk20a_from_vm(vm);
struct nvgpu_pd_mem_entry *pentry; struct nvgpu_pd_mem_entry *pentry;
pd_dbg(g, "PD-Free [C] 0x%p", pd->mem); nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
pentry = nvgpu_pd_cache_look_up(g->mm.pd_cache, pd);
pentry = nvgpu_pd_cache_look_up(cache, pd);
if (pentry == NULL) { if (pentry == NULL) {
nvgpu_mutex_release(&g->mm.pd_cache->lock);
nvgpu_do_assert_print(g, "Attempting to free non-existent pd"); nvgpu_do_assert_print(g, "Attempting to free non-existent pd");
return; return;
} }
nvgpu_pd_cache_do_free(g, cache, pentry, pd); nvgpu_pd_cache_do_free(g, g->mm.pd_cache, pentry, pd);
}
void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
{
struct gk20a *g = gk20a_from_vm(vm);
/*
* Simple case: just DMA free.
*/
if (!pd->cached) {
return nvgpu_pd_cache_free_direct(g, pd);
}
nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
nvgpu_pd_cache_free(g, g->mm.pd_cache, pd);
nvgpu_mutex_release(&g->mm.pd_cache->lock); nvgpu_mutex_release(&g->mm.pd_cache->lock);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -162,6 +162,12 @@ struct nvgpu_pd_cache {
*/ */
struct nvgpu_list_node partial[NVGPU_PD_CACHE_COUNT]; struct nvgpu_list_node partial[NVGPU_PD_CACHE_COUNT];
/**
* List of nvgpu_pd_mem_entries which are directly allocated by
* dma_alloc APIs.
*/
struct nvgpu_list_node direct;
/** /**
* Tree of all allocated struct nvgpu_mem's for fast look up. * Tree of all allocated struct nvgpu_mem's for fast look up.
*/ */

View File

@@ -807,11 +807,12 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
/* Initialize the page table data structures. */ /* Initialize the page table data structures. */
(void) strncpy(vm->name, name, (void) strncpy(vm->name, name,
min(strlen(name), (size_t)(sizeof(vm->name)-1ULL))); min(strlen(name), (size_t)(sizeof(vm->name)-1ULL)));
if (!g->is_virtual) {
err = nvgpu_gmmu_init_page_table(vm); err = nvgpu_gmmu_init_page_table(vm);
if (err != 0) { if (err != 0) {
goto clean_up_gpu_vm; goto clean_up_gpu_vm;
} }
}
err = nvgpu_vm_init_vma(g, vm, user_reserved, kernel_reserved, err = nvgpu_vm_init_vma(g, vm, user_reserved, kernel_reserved,
small_big_split, big_pages, unified_va, name); small_big_split, big_pages, unified_va, name);
if (err != 0) { if (err != 0) {
@@ -984,7 +985,9 @@ static void nvgpu_vm_remove(struct vm_gk20a *vm)
nvgpu_alloc_destroy(&vm->user_lp); nvgpu_alloc_destroy(&vm->user_lp);
} }
if (!g->is_virtual) {
nvgpu_vm_free_entries(vm, &vm->pdb); nvgpu_vm_free_entries(vm, &vm->pdb);
}
if (g->ops.mm.vm_as_free_share != NULL) { if (g->ops.mm.vm_as_free_share != NULL) {
g->ops.mm.vm_as_free_share(vm); g->ops.mm.vm_as_free_share(vm);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -125,6 +125,36 @@ static inline bool nvgpu_list_empty(struct nvgpu_list_node *head)
return head->next == head; return head->next == head;
} }
/**
* @brief Get the list length.
*
* Checks if the list pointed by \a head is empty or not.
*
* @param head [in] Head node of the list to be checked. This
* performs validation of this parameter.
*
* @return length of the list.
*
* @retval 0 if list is empty.
* @retval count if list is not empty.
*/
static inline unsigned int nvgpu_list_length(struct nvgpu_list_node *head)
{
unsigned int count = 0U;
struct nvgpu_list_node *curr_node = head;
if (curr_node == NULL) {
return 0U;
}
do {
curr_node = curr_node->next;
count++;
} while (curr_node != head);
return count;
}
/** /**
* @brief Move a node from the list to head. * @brief Move a node from the list to head.
* *

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -131,6 +131,17 @@ void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd);
*/ */
int nvgpu_pd_cache_init(struct gk20a *g); int nvgpu_pd_cache_init(struct gk20a *g);
/**
* @brief Returns list of all page table memory buffers.
*
* @param g [in] The GPU.
* @param no_of_mems [out] No. of page table memory buffers.
*
* @return Pointer to array of struct nvgpu_mem pointers in case of success.
* @retval NULL in case of failure.
*/
struct nvgpu_mem **nvgpu_pd_cache_get_nvgpu_mems(struct gk20a *g, uint32_t *no_of_mems);
/** /**
* @brief Free the pd_cache tracking stuff allocated by nvgpu_pd_cache_init(). * @brief Free the pd_cache tracking stuff allocated by nvgpu_pd_cache_init().
* *

View File

@@ -357,7 +357,7 @@ test_mm_alloc_inst_block.alloc_inst_block=0
test_mm_init_hal.init_hal=0 test_mm_init_hal.init_hal=0
test_mm_inst_block.inst_block=0 test_mm_inst_block.inst_block=0
test_mm_page_sizes.page_sizes=0 test_mm_page_sizes.page_sizes=0
test_mm_remove_mm_support.remove_support=0 test_mm_remove_mm_support.remove_support=2
test_mm_suspend.suspend=0 test_mm_suspend.suspend=0
test_nvgpu_init_mm.init_mm=0 test_nvgpu_init_mm.init_mm=0
test_nvgpu_mm_setup_hw.init_mm_hw=0 test_nvgpu_mm_setup_hw.init_mm_hw=0
@@ -838,38 +838,38 @@ test_page_faults_inst_block.inst_block_s2=0
test_page_faults_pending.pending=0 test_page_faults_pending.pending=0
[pd_cache] [pd_cache]
test_gpu_address.gpu_address=0 test_gpu_address.gpu_address=2
test_init_deinit.deinit=0 test_init_deinit.deinit=2
test_init_deinit.init_deinit=0 test_init_deinit.init_deinit=2
test_init_deinit.multi_init=0 test_init_deinit.multi_init=2
test_offset_computation.offset_comp=0 test_offset_computation.offset_comp=2
test_pd_alloc_direct_fi.alloc_direct_oom=0 test_pd_alloc_direct_fi.alloc_direct_oom=2
test_pd_alloc_fi.alloc_oom=0 test_pd_alloc_fi.alloc_oom=2
test_pd_alloc_invalid_input.invalid_pd_alloc=0 test_pd_alloc_invalid_input.invalid_pd_alloc=2
test_pd_cache_alloc_gen.alloc_1024x256B_x11x3=0 test_pd_cache_alloc_gen.alloc_1024x256B_x11x3=2
test_pd_cache_alloc_gen.alloc_1024x256B_x16x15=0 test_pd_cache_alloc_gen.alloc_1024x256B_x16x15=2
test_pd_cache_alloc_gen.alloc_1024x256B_x16x1=0 test_pd_cache_alloc_gen.alloc_1024x256B_x16x1=2
test_pd_cache_alloc_gen.alloc_1024x256B_x32x1=0 test_pd_cache_alloc_gen.alloc_1024x256B_x32x1=2
test_pd_cache_alloc_gen.alloc_1x1024B=0 test_pd_cache_alloc_gen.alloc_1x1024B=2
test_pd_cache_alloc_gen.alloc_1x2048B=0 test_pd_cache_alloc_gen.alloc_1x2048B=2
test_pd_cache_alloc_gen.alloc_1x256B=0 test_pd_cache_alloc_gen.alloc_1x256B=2
test_pd_cache_alloc_gen.alloc_1x512B=0 test_pd_cache_alloc_gen.alloc_1x512B=2
test_pd_cache_alloc_gen.alloc_direct_1024x16PAGE=0 test_pd_cache_alloc_gen.alloc_direct_1024x16PAGE=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE=0 test_pd_cache_alloc_gen.alloc_direct_1024xPAGE=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x15=0 test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x15=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x1=0 test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x1=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x4=0 test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x16x4=2
test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x32x24=0 test_pd_cache_alloc_gen.alloc_direct_1024xPAGE_x32x24=2
test_pd_cache_alloc_gen.alloc_direct_1x16PAGE=0 test_pd_cache_alloc_gen.alloc_direct_1x16PAGE=2
test_pd_cache_alloc_gen.alloc_direct_1xPAGE=0 test_pd_cache_alloc_gen.alloc_direct_1xPAGE=2
test_pd_cache_env_init.env_init=0 test_pd_cache_env_init.env_init=2
test_pd_cache_fini.fini=0 test_pd_cache_fini.fini=2
test_pd_cache_init.init=0 test_pd_cache_init.init=2
test_pd_cache_valid_alloc.valid_alloc=0 test_pd_cache_valid_alloc.valid_alloc=2
test_pd_free_empty_pd.free_empty=0 test_pd_free_empty_pd.free_empty=2
test_pd_write.write=0 test_pd_write.write=2
test_per_pd_size.pd_packing=0 test_per_pd_size.pd_packing=2
test_per_pd_size.pd_reusability=0 test_per_pd_size.pd_reusability=2
[posix_bitops] [posix_bitops]
test_bit_setclear.bit_clear=0 test_bit_setclear.bit_clear=0

View File

@@ -147,6 +147,10 @@ int test_ltc_init_support(struct unit_module *m,
unit_return_fail(m, "CIC Mon init failed\n"); unit_return_fail(m, "CIC Mon init failed\n");
} }
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
/* /*
* Init dependent ECC unit * Init dependent ECC unit
*/ */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -96,7 +96,9 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
aperture_size = GK20A_PMU_VA_SIZE; aperture_size = GK20A_PMU_VA_SIZE;
flags |= GPU_ALLOC_GVA_SPACE; flags |= GPU_ALLOC_GVA_SPACE;
if (nvgpu_pd_cache_init(g) != 0) {
unit_err(m, "PD cache initialization failed\n");
}
/* Init vm with big_pages disabled */ /* Init vm with big_pages disabled */
test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(), test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
low_hole, low_hole,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -267,6 +267,10 @@ int test_mm_dma_init(struct unit_module *m, struct gk20a *g, void *args)
return UNIT_FAIL; return UNIT_FAIL;
} }
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -937,63 +937,63 @@ static int test_pd_cache_env_init(struct unit_module *m,
} }
struct unit_module_test pd_cache_tests[] = { struct unit_module_test pd_cache_tests[] = {
UNIT_TEST(env_init, test_pd_cache_env_init, NULL, 0), UNIT_TEST(env_init, test_pd_cache_env_init, NULL, 2),
UNIT_TEST(init, test_pd_cache_init, NULL, 0), UNIT_TEST(init, test_pd_cache_init, NULL, 2),
UNIT_TEST(fini, test_pd_cache_fini, NULL, 0), UNIT_TEST(fini, test_pd_cache_fini, NULL, 2),
/* /*
* Requirement verification tests. * Requirement verification tests.
*/ */
UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4", UNIT_TEST_REQ("NVGPU-RQCD-68.C1,2", PD_CACHE_REQ1_UID, "V4",
valid_alloc, test_pd_cache_valid_alloc, NULL, 0), valid_alloc, test_pd_cache_valid_alloc, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4", UNIT_TEST_REQ("NVGPU-RQCD-68.C3", PD_CACHE_REQ1_UID, "V4",
pd_packing, test_per_pd_size, do_test_pd_cache_packing_size, 0), pd_packing, test_per_pd_size, do_test_pd_cache_packing_size, 2),
UNIT_TEST_REQ("NVGPU-RQCD-118.C1", PD_CACHE_REQ2_UID, "V3", UNIT_TEST_REQ("NVGPU-RQCD-118.C1", PD_CACHE_REQ2_UID, "V3",
pd_reusability, test_per_pd_size, do_test_pd_reusability, 0), pd_reusability, test_per_pd_size, do_test_pd_reusability, 2),
UNIT_TEST_REQ("NVGPU-RQCD-122.C1", PD_CACHE_REQ3_UID, "V3", UNIT_TEST_REQ("NVGPU-RQCD-122.C1", PD_CACHE_REQ3_UID, "V3",
write, test_pd_write, NULL, 0), write, test_pd_write, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-123.C1", PD_CACHE_REQ4_UID, "V2", UNIT_TEST_REQ("NVGPU-RQCD-123.C1", PD_CACHE_REQ4_UID, "V2",
gpu_address, test_gpu_address, NULL, 0), gpu_address, test_gpu_address, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-126.C1,2", PD_CACHE_REQ5_UID, "V1", UNIT_TEST_REQ("NVGPU-RQCD-126.C1,2", PD_CACHE_REQ5_UID, "V1",
offset_comp, test_offset_computation, NULL, 0), offset_comp, test_offset_computation, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-124.C1", PD_CACHE_REQ6_UID, "V3", UNIT_TEST_REQ("NVGPU-RQCD-124.C1", PD_CACHE_REQ6_UID, "V3",
init_deinit, test_init_deinit, NULL, 0), init_deinit, test_init_deinit, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-155.C1", PD_CACHE_REQ7_UID, "V2", UNIT_TEST_REQ("NVGPU-RQCD-155.C1", PD_CACHE_REQ7_UID, "V2",
multi_init, test_init_deinit, NULL, 0), multi_init, test_init_deinit, NULL, 2),
UNIT_TEST_REQ("NVGPU-RQCD-125.C1", PD_CACHE_REQ8_UID, "V2", UNIT_TEST_REQ("NVGPU-RQCD-125.C1", PD_CACHE_REQ8_UID, "V2",
deinit, test_init_deinit, NULL, 0), deinit, test_init_deinit, NULL, 2),
/* /*
* Direct allocs. * Direct allocs.
*/ */
UNIT_TEST(alloc_direct_1xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1xPAGE, 0), UNIT_TEST(alloc_direct_1xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1xPAGE, 2),
UNIT_TEST(alloc_direct_1024xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE, 0), UNIT_TEST(alloc_direct_1024xPAGE, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE, 2),
UNIT_TEST(alloc_direct_1x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1x16PAGE, 0), UNIT_TEST(alloc_direct_1x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1x16PAGE, 2),
UNIT_TEST(alloc_direct_1024x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1024x16PAGE, 0), UNIT_TEST(alloc_direct_1024x16PAGE, test_pd_cache_alloc_gen, &alloc_direct_1024x16PAGE, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x32x24, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x32x24, 0), UNIT_TEST(alloc_direct_1024xPAGE_x32x24, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x32x24, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x4, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x4, 0), UNIT_TEST(alloc_direct_1024xPAGE_x16x4, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x4, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x15, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x15, 0), UNIT_TEST(alloc_direct_1024xPAGE_x16x15, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x15, 2),
UNIT_TEST(alloc_direct_1024xPAGE_x16x1, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x1, 0), UNIT_TEST(alloc_direct_1024xPAGE_x16x1, test_pd_cache_alloc_gen, &alloc_direct_1024xPAGE_x16x1, 2),
/* /*
* Cached allocs. * Cached allocs.
*/ */
UNIT_TEST(alloc_1x256B, test_pd_cache_alloc_gen, &alloc_1x256B, 0), UNIT_TEST(alloc_1x256B, test_pd_cache_alloc_gen, &alloc_1x256B, 2),
UNIT_TEST(alloc_1x512B, test_pd_cache_alloc_gen, &alloc_1x512B, 0), UNIT_TEST(alloc_1x512B, test_pd_cache_alloc_gen, &alloc_1x512B, 2),
UNIT_TEST(alloc_1x1024B, test_pd_cache_alloc_gen, &alloc_1x1024B, 0), UNIT_TEST(alloc_1x1024B, test_pd_cache_alloc_gen, &alloc_1x1024B, 2),
UNIT_TEST(alloc_1x2048B, test_pd_cache_alloc_gen, &alloc_1x2048B, 0), UNIT_TEST(alloc_1x2048B, test_pd_cache_alloc_gen, &alloc_1x2048B, 2),
UNIT_TEST(alloc_1024x256B_x16x15, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x15, 0), UNIT_TEST(alloc_1024x256B_x16x15, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x15, 2),
UNIT_TEST(alloc_1024x256B_x16x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x1, 0), UNIT_TEST(alloc_1024x256B_x16x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x16x1, 2),
UNIT_TEST(alloc_1024x256B_x32x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x32x1, 0), UNIT_TEST(alloc_1024x256B_x32x1, test_pd_cache_alloc_gen, &alloc_1024x256B_x32x1, 2),
UNIT_TEST(alloc_1024x256B_x11x3, test_pd_cache_alloc_gen, &alloc_1024x256B_x11x3, 0), UNIT_TEST(alloc_1024x256B_x11x3, test_pd_cache_alloc_gen, &alloc_1024x256B_x11x3, 2),
/* /*
* Error path testing. * Error path testing.
*/ */
UNIT_TEST(free_empty, test_pd_free_empty_pd, NULL, 0), UNIT_TEST(free_empty, test_pd_free_empty_pd, NULL, 2),
UNIT_TEST(invalid_pd_alloc, test_pd_alloc_invalid_input, NULL, 0), UNIT_TEST(invalid_pd_alloc, test_pd_alloc_invalid_input, NULL, 2),
UNIT_TEST(alloc_direct_oom, test_pd_alloc_direct_fi, NULL, 0), UNIT_TEST(alloc_direct_oom, test_pd_alloc_direct_fi, NULL, 2),
UNIT_TEST(alloc_oom, test_pd_alloc_fi, NULL, 0), UNIT_TEST(alloc_oom, test_pd_alloc_fi, NULL, 2),
}; };
UNIT_MODULE(pd_cache, pd_cache_tests, UNIT_PRIO_NVGPU_TEST); UNIT_MODULE(pd_cache, pd_cache_tests, UNIT_PRIO_NVGPU_TEST);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -186,6 +186,10 @@ int test_env_init_flush_gk20a_fusa(struct unit_module *m, struct gk20a *g,
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -184,6 +184,9 @@ int test_env_init_flush_gv11b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0; g->log_mask = 0;
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -171,6 +171,9 @@ int test_env_init_mm_gp10b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0; g->log_mask = 0;
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -176,6 +176,9 @@ int test_env_init_mm_gv11b_fusa(struct unit_module *m, struct gk20a *g,
g->log_mask = 0; g->log_mask = 0;
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -217,6 +217,9 @@ int test_env_init_mm_mmu_fault_gv11b_fusa(struct unit_module *m,
g->log_mask = 0; g->log_mask = 0;
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");

View File

@@ -453,6 +453,9 @@ int test_mm_init_hal(struct unit_module *m, struct gk20a *g, void *args)
current_module = m; current_module = m;
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g); struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
@@ -741,7 +744,7 @@ struct unit_module_test nvgpu_mm_mm_tests[] = {
UNIT_TEST(init_mm, test_nvgpu_init_mm, NULL, 0), UNIT_TEST(init_mm, test_nvgpu_init_mm, NULL, 0),
UNIT_TEST(init_mm_hw, test_nvgpu_mm_setup_hw, NULL, 0), UNIT_TEST(init_mm_hw, test_nvgpu_mm_setup_hw, NULL, 0),
UNIT_TEST(suspend, test_mm_suspend, NULL, 0), UNIT_TEST(suspend, test_mm_suspend, NULL, 0),
UNIT_TEST(remove_support, test_mm_remove_mm_support, NULL, 0), UNIT_TEST(remove_support, test_mm_remove_mm_support, NULL, 2),
UNIT_TEST(page_sizes, test_mm_page_sizes, NULL, 0), UNIT_TEST(page_sizes, test_mm_page_sizes, NULL, 0),
UNIT_TEST(inst_block, test_mm_inst_block, NULL, 0), UNIT_TEST(inst_block, test_mm_inst_block, NULL, 0),
UNIT_TEST(alloc_inst_block, test_mm_alloc_inst_block, NULL, 0), UNIT_TEST(alloc_inst_block, test_mm_alloc_inst_block, NULL, 0),

View File

@@ -274,7 +274,9 @@ int test_page_faults_init(struct unit_module *m, struct gk20a *g, void *args)
} }
init_platform(m, g, true); init_platform(m, g, true);
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
if (init_mm(m, g) != 0) { if (init_mm(m, g) != 0) {
unit_return_fail(m, "nvgpu_init_mm_support failed\n"); unit_return_fail(m, "nvgpu_init_mm_support failed\n");
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -144,6 +144,10 @@ int test_sync_init(struct unit_module *m, struct gk20a *g, void *args)
unit_return_fail(m, "nvgpu_sync_early_init failed\n"); unit_return_fail(m, "nvgpu_sync_early_init failed\n");
} }
if (nvgpu_pd_cache_init(g) != 0) {
unit_return_fail(m, "PD cache initialization failed\n");
}
/* /*
* Alloc memory for g->syncpt_mem * Alloc memory for g->syncpt_mem
*/ */