mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: SGL passthrough implementation
The basic nvgpu_mem_sgl implementation provides support for OS specific scatter-gather list implementations by simply copying them node by node. This is inefficient, taking extra time and memory. This patch implements an nvgpu_mem_sgt struct to act as a header which is inserted at the front of any scatter- gather list implementation. This labels every struct with a set of ops which can be used to interact with the attached scatter gather list. Since nvgpu common code only has to interact with these function pointers, any sgl implementation can be used. Initialization only requires the allocation of a single struct, removing the need to copy or iterate through the sgl being converted. Jira NVGPU-186 Change-Id: I2994f804a4a4cc141b702e987e9081d8560ba2e8 Signed-off-by: Sunny He <suhe@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1541426 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0090ee5aca
commit
17c581d755
@@ -397,42 +397,59 @@ int __nvgpu_mem_create_from_pages(struct gk20a *g, struct nvgpu_mem *dest,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvgpu_mem_sgl *__nvgpu_mem_sgl_dup(struct gk20a *g,
|
||||
struct nvgpu_mem_sgl *sgl)
|
||||
static void *nvgpu_mem_linux_sgl_next(void *sgl)
|
||||
{
|
||||
struct nvgpu_mem_sgl *head, *next;
|
||||
|
||||
head = nvgpu_kzalloc(g, sizeof(*sgl));
|
||||
if (!head)
|
||||
return NULL;
|
||||
|
||||
next = head;
|
||||
while (true) {
|
||||
nvgpu_log(g, gpu_dbg_sgl,
|
||||
" phys: 0x%-12llx dma: 0x%-12llx len: 0x%llx",
|
||||
sgl->phys, sgl->dma, sgl->length);
|
||||
|
||||
next->dma = sgl->dma;
|
||||
next->phys = sgl->phys;
|
||||
next->length = sgl->length;
|
||||
next->next = NULL;
|
||||
|
||||
sgl = nvgpu_mem_sgl_next(sgl);
|
||||
if (!sgl)
|
||||
break;
|
||||
|
||||
next->next = nvgpu_kzalloc(g, sizeof(*sgl));
|
||||
if (!next->next) {
|
||||
nvgpu_mem_sgl_free(g, head);
|
||||
return NULL;
|
||||
}
|
||||
next = next->next;
|
||||
}
|
||||
|
||||
return head;
|
||||
return sg_next((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static struct nvgpu_mem_sgl *__nvgpu_mem_sgl_create_from_vidmem(
|
||||
static u64 nvgpu_mem_linux_sgl_phys(void *sgl)
|
||||
{
|
||||
return (u64)sg_phys((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_dma(void *sgl)
|
||||
{
|
||||
return (u64)sg_dma_address((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_length(void *sgl)
|
||||
{
|
||||
return (u64)((struct scatterlist *)sgl)->length;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g, void *sgl,
|
||||
struct nvgpu_gmmu_attrs *attrs)
|
||||
{
|
||||
if (sg_dma_address((struct scatterlist *)sgl) == 0)
|
||||
return g->ops.mm.gpu_phys_addr(g, attrs,
|
||||
sg_phys((struct scatterlist *)sgl));
|
||||
|
||||
if (sg_dma_address((struct scatterlist *)sgl) == DMA_ERROR_CODE)
|
||||
return 0;
|
||||
|
||||
return gk20a_mm_smmu_vaddr_translate(g,
|
||||
sg_dma_address((struct scatterlist *)sgl));
|
||||
}
|
||||
|
||||
static void nvgpu_mem_linux_sgl_free(struct gk20a *g, struct nvgpu_sgt *sgt)
|
||||
{
|
||||
/*
|
||||
* Free this SGT. All we do is free the passed SGT. The actual Linux
|
||||
* SGT/SGL needs to be freed separately.
|
||||
*/
|
||||
nvgpu_kfree(g, sgt);
|
||||
}
|
||||
|
||||
static const struct nvgpu_sgt_ops nvgpu_linux_sgt_ops = {
|
||||
.sgl_next = nvgpu_mem_linux_sgl_next,
|
||||
.sgl_phys = nvgpu_mem_linux_sgl_phys,
|
||||
.sgl_dma = nvgpu_mem_linux_sgl_dma,
|
||||
.sgl_length = nvgpu_mem_linux_sgl_length,
|
||||
.sgl_gpu_addr = nvgpu_mem_linux_sgl_gpu_addr,
|
||||
.sgt_free = nvgpu_mem_linux_sgl_free,
|
||||
};
|
||||
|
||||
static struct nvgpu_sgt *__nvgpu_mem_get_sgl_from_vidmem(
|
||||
struct gk20a *g,
|
||||
struct scatterlist *linux_sgl)
|
||||
{
|
||||
@@ -442,70 +459,31 @@ static struct nvgpu_mem_sgl *__nvgpu_mem_sgl_create_from_vidmem(
|
||||
if (!vidmem_alloc)
|
||||
return NULL;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_sgl, "Vidmem sgl:");
|
||||
|
||||
return __nvgpu_mem_sgl_dup(g, vidmem_alloc->sgl);
|
||||
return &vidmem_alloc->sgt;
|
||||
}
|
||||
|
||||
struct nvgpu_mem_sgl *nvgpu_mem_sgl_create(struct gk20a *g,
|
||||
struct sg_table *sgt)
|
||||
struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt)
|
||||
{
|
||||
struct nvgpu_mem_sgl *head, *sgl, *next;
|
||||
struct nvgpu_sgt *nvgpu_sgt;
|
||||
struct scatterlist *linux_sgl = sgt->sgl;
|
||||
|
||||
if (is_vidmem_page_alloc(sg_dma_address(linux_sgl)))
|
||||
return __nvgpu_mem_sgl_create_from_vidmem(g, linux_sgl);
|
||||
return __nvgpu_mem_get_sgl_from_vidmem(g, linux_sgl);
|
||||
|
||||
head = nvgpu_kzalloc(g, sizeof(*sgl));
|
||||
if (!head)
|
||||
nvgpu_sgt = nvgpu_kzalloc(g, sizeof(*nvgpu_sgt));
|
||||
if (!nvgpu_sgt)
|
||||
return NULL;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_sgl, "Making sgl:");
|
||||
nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!");
|
||||
|
||||
sgl = head;
|
||||
while (true) {
|
||||
sgl->dma = sg_dma_address(linux_sgl);
|
||||
sgl->phys = sg_phys(linux_sgl);
|
||||
sgl->length = linux_sgl->length;
|
||||
nvgpu_sgt->sgl = sgt->sgl;
|
||||
nvgpu_sgt->ops = &nvgpu_linux_sgt_ops;
|
||||
|
||||
/*
|
||||
* We don't like offsets in the pages here. This will cause
|
||||
* problems.
|
||||
*/
|
||||
if (WARN_ON(linux_sgl->offset)) {
|
||||
nvgpu_mem_sgl_free(g, head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_sgl,
|
||||
" phys: 0x%-12llx dma: 0x%-12llx len: 0x%llx",
|
||||
sgl->phys, sgl->dma, sgl->length);
|
||||
|
||||
/*
|
||||
* When there's no more SGL ents for the Linux SGL we are
|
||||
* done. Don't bother making any more SGL ents for the nvgpu
|
||||
* SGL.
|
||||
*/
|
||||
linux_sgl = sg_next(linux_sgl);
|
||||
if (!linux_sgl)
|
||||
break;
|
||||
|
||||
next = nvgpu_kzalloc(g, sizeof(*sgl));
|
||||
if (!next) {
|
||||
nvgpu_mem_sgl_free(g, head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sgl->next = next;
|
||||
sgl = next;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_sgl, "Done!");
|
||||
return head;
|
||||
return nvgpu_sgt;
|
||||
}
|
||||
|
||||
struct nvgpu_mem_sgl *nvgpu_mem_sgl_create_from_mem(struct gk20a *g,
|
||||
struct nvgpu_mem *mem)
|
||||
struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
|
||||
struct nvgpu_mem *mem)
|
||||
{
|
||||
return nvgpu_mem_sgl_create(g, mem->priv.sgt);
|
||||
return nvgpu_linux_sgt_create(g, mem->priv.sgt);
|
||||
}
|
||||
|
||||
@@ -69,19 +69,20 @@ static u64 nvgpu_get_buffer_alignment(struct gk20a *g, struct scatterlist *sgl,
|
||||
|
||||
if (aperture == APERTURE_VIDMEM) {
|
||||
struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
|
||||
struct nvgpu_mem_sgl *sgl_vid = alloc->sgl;
|
||||
struct nvgpu_sgt *sgt = &alloc->sgt;
|
||||
void *sgl_vid = sgt->sgl;
|
||||
|
||||
while (sgl_vid) {
|
||||
chunk_align = 1ULL <<
|
||||
__ffs(nvgpu_mem_sgl_phys(sgl_vid) |
|
||||
nvgpu_mem_sgl_length(sgl_vid));
|
||||
__ffs(nvgpu_sgt_get_phys(sgt, sgl_vid)) |
|
||||
nvgpu_sgt_get_length(sgt, sgl_vid);
|
||||
|
||||
if (align)
|
||||
align = min(align, chunk_align);
|
||||
else
|
||||
align = chunk_align;
|
||||
|
||||
sgl_vid = nvgpu_mem_sgl_next(sgl_vid);
|
||||
sgl_vid = nvgpu_sgt_get_next(sgt, sgl_vid);
|
||||
}
|
||||
|
||||
return align;
|
||||
@@ -242,7 +243,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
struct nvgpu_vm_area *vm_area = NULL;
|
||||
u32 ctag_offset;
|
||||
enum nvgpu_aperture aperture;
|
||||
struct nvgpu_mem_sgl *nvgpu_sgl;
|
||||
struct nvgpu_sgt *nvgpu_sgt;
|
||||
|
||||
/*
|
||||
* The kind used as part of the key for map caching. HW may
|
||||
@@ -399,12 +400,12 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
ctag_offset += buffer_offset >>
|
||||
ilog2(g->ops.fb.compression_page_size(g));
|
||||
|
||||
nvgpu_sgl = nvgpu_mem_sgl_create(g, bfr.sgt);
|
||||
nvgpu_sgt = nvgpu_linux_sgt_create(g, bfr.sgt);
|
||||
|
||||
/* update gmmu ptes */
|
||||
map_offset = g->ops.mm.gmmu_map(vm,
|
||||
map_offset,
|
||||
nvgpu_sgl,
|
||||
nvgpu_sgt,
|
||||
buffer_offset, /* sg offset */
|
||||
mapping_size,
|
||||
bfr.pgsz_idx,
|
||||
@@ -419,7 +420,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
if (!map_offset)
|
||||
goto clean_up;
|
||||
|
||||
nvgpu_mem_sgl_free(g, nvgpu_sgl);
|
||||
nvgpu_sgt_free(nvgpu_sgt, g);
|
||||
|
||||
mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
|
||||
if (!mapped_buffer) {
|
||||
|
||||
Reference in New Issue
Block a user