gpu: nvgpu: posix: expose nvgpu_mem operations

The nvgpu_mem operations were all static. This patch makes
them public so that they can be reused by other modules.

JIRA NVGPU-907

Change-Id: I17cd3934480bcd85d42c2bafbecc23194434ba79
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1972429
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nicolas Benech
2018-12-13 12:56:02 -05:00
committed by mobile promotions
parent 48c0a239e7
commit 76e5d6ab27
2 changed files with 19 additions and 8 deletions

View File

@@ -137,4 +137,15 @@ void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt);
bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt);
u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt);
struct nvgpu_sgl *nvgpu_mem_sgl_next(struct nvgpu_sgl *sgl);
u64 nvgpu_mem_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl);
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl, u64 ipa,
u64 *pa_len);
u64 nvgpu_mem_sgl_dma(struct nvgpu_sgl *sgl);
u64 nvgpu_mem_sgl_length(struct nvgpu_sgl *sgl);
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
struct nvgpu_gmmu_attrs *attrs);
bool nvgpu_mem_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt);
void nvgpu_mem_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt);
#endif /* NVGPU_SGT_H */

View File

@@ -45,41 +45,41 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
return (u64)(uintptr_t)mem->cpu_va;
}
static struct nvgpu_sgl *nvgpu_mem_sgl_next(struct nvgpu_sgl *sgl)
struct nvgpu_sgl *nvgpu_mem_sgl_next(struct nvgpu_sgl *sgl)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
return (struct nvgpu_sgl *) mem->next;
}
static u64 nvgpu_mem_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
u64 nvgpu_mem_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
return (u64)(uintptr_t)mem->phys;
}
static u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl,
u64 nvgpu_mem_sgl_ipa_to_pa(struct gk20a *g, struct nvgpu_sgl *sgl,
u64 ipa, u64 *pa_len)
{
return nvgpu_mem_sgl_phys(g, sgl);
}
static u64 nvgpu_mem_sgl_dma(struct nvgpu_sgl *sgl)
u64 nvgpu_mem_sgl_dma(struct nvgpu_sgl *sgl)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
return (u64)(uintptr_t)mem->dma;
}
static u64 nvgpu_mem_sgl_length(struct nvgpu_sgl *sgl)
u64 nvgpu_mem_sgl_length(struct nvgpu_sgl *sgl)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
return (u64)mem->length;
}
static u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
struct nvgpu_gmmu_attrs *attrs)
{
struct nvgpu_mem_sgl *mem = (struct nvgpu_mem_sgl *)sgl;
@@ -95,7 +95,7 @@ static u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_sgl *sgl,
return nvgpu_mem_iommu_translate(g, mem->dma);
}
static bool nvgpu_mem_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt)
bool nvgpu_mem_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt)
{
struct nvgpu_os_posix *p = nvgpu_os_posix_from_gk20a(g);
@@ -113,7 +113,7 @@ void nvgpu_mem_sgl_free(struct gk20a *g, struct nvgpu_mem_sgl *sgl)
}
}
static void nvgpu_mem_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
void nvgpu_mem_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
{
nvgpu_mem_sgl_free(g, (struct nvgpu_mem_sgl *)sgt->sgl);
nvgpu_kfree(g, sgt);