mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Move pd_cache declarations to new header
The pd_cache header declarations were oriignally part of the gmmu.h header. This is not good from a unit isolation perspective so this patch moves all the pd_cache specifics over to a new header file: <nvgpu/pd_cache.h>. Also a couple of static inlines that were possible when the code was part of gmmu.h were turned into real, first class functions. This allowed the pd_cache.h header to not include the gmmu.h header file. Also fix an issue in the nvgpu_pd_write() function where the data was being passed as a size_t for some reason. This has now been changed to a u32. JIRA NVGPU-1444 Change-Id: Ib9e9e5a54544de403bfcd8e11c30de05721ddbcc Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1966352 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
5bdffee1a8
commit
ba85fc999b
@@ -25,6 +25,7 @@
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/gmmu.h>
|
||||
#include <nvgpu/pd_cache.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/nvgpu_sgt.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
@@ -218,22 +219,6 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the _physical_ address of a page directory.
|
||||
*/
|
||||
u64 nvgpu_pde_gpu_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
|
||||
{
|
||||
u64 page_addr;
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_NVLINK)) {
|
||||
page_addr = nvgpu_mem_get_phys_addr(g, pd->mem);
|
||||
} else {
|
||||
page_addr = nvgpu_mem_get_addr(g, pd->mem);
|
||||
}
|
||||
|
||||
return page_addr + pd->mem_offs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the aligned length based on the page size in attrs.
|
||||
*/
|
||||
@@ -477,7 +462,7 @@ static int __set_pd_level(struct vm_gk20a *vm,
|
||||
* target addr is the real physical address we are aiming for.
|
||||
*/
|
||||
target_addr = (next_pd != NULL) ?
|
||||
nvgpu_pde_gpu_addr(g, next_pd) :
|
||||
nvgpu_pd_gpu_addr(g, next_pd) :
|
||||
phys_addr;
|
||||
|
||||
l->update_entry(vm, l,
|
||||
@@ -987,8 +972,8 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
||||
* Take into account the real offset into the nvgpu_mem since the PD
|
||||
* may be located at an offset other than 0 (due to PD packing).
|
||||
*/
|
||||
pte_base = (pd->mem_offs / sizeof(u32)) +
|
||||
pd_offset_from_index(l, pd_idx);
|
||||
pte_base = (u32)(pd->mem_offs / sizeof(u32)) +
|
||||
nvgpu_pd_offset_from_index(l, pd_idx);
|
||||
pte_size = (u32)(l->entry_size / sizeof(u32));
|
||||
|
||||
if (data != NULL) {
|
||||
@@ -1006,7 +991,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
||||
}
|
||||
|
||||
if (pd_offs_out != NULL) {
|
||||
*pd_offs_out = pd_offset_from_index(l, pd_idx);
|
||||
*pd_offs_out = nvgpu_pd_offset_from_index(l, pd_idx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1043,7 +1028,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
|
||||
pte_size = __nvgpu_pte_words(g);
|
||||
|
||||
for (i = 0; i < pte_size; i++) {
|
||||
pd_write(g, pd, (size_t)pd_offs + (size_t)i, pte[i]);
|
||||
nvgpu_pd_write(g, pd, (size_t)pd_offs + (size_t)i, pte[i]);
|
||||
pte_dbg(g, attrs_ptr,
|
||||
"PTE: idx=%-4u (%d) 0x%08x", pd_idx, i, pte[i]);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user