mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: Move and rename gk20a_sgtable*
Move and rename the functions that build sgtables for nvgpu_mems into the Linux specific DMA code. One place outside of the Linux code do include the Linux DMA header. That will be fixed in a subsequent patch. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ie43c752b8f998f122af70f7c7eb727af0b0d98df Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464078 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6a14d980cf
commit
126c735d30
@@ -21,6 +21,8 @@
|
|||||||
#include <nvgpu/lock.h>
|
#include <nvgpu/lock.h>
|
||||||
#include <nvgpu/bug.h>
|
#include <nvgpu/bug.h>
|
||||||
|
|
||||||
|
#include <nvgpu/linux/dma.h>
|
||||||
|
|
||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
|
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
@@ -126,11 +128,11 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
||||||
err = gk20a_get_sgtable_from_pages(d, &mem->priv.sgt,
|
err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt,
|
||||||
mem->priv.pages,
|
mem->priv.pages,
|
||||||
iova, size);
|
iova, size);
|
||||||
else {
|
else {
|
||||||
err = gk20a_get_sgtable(d, &mem->priv.sgt, mem->cpu_va,
|
err = nvgpu_get_sgtable(g, &mem->priv.sgt, mem->cpu_va,
|
||||||
iova, size);
|
iova, size);
|
||||||
memset(mem->cpu_va, 0, size);
|
memset(mem->cpu_va, 0, size);
|
||||||
}
|
}
|
||||||
@@ -359,7 +361,7 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mem->priv.sgt)
|
if (mem->priv.sgt)
|
||||||
gk20a_free_sgtable(g, &mem->priv.sgt);
|
nvgpu_free_sgtable(g, &mem->priv.sgt);
|
||||||
|
|
||||||
mem->size = 0;
|
mem->size = 0;
|
||||||
mem->aperture = APERTURE_INVALID;
|
mem->aperture = APERTURE_INVALID;
|
||||||
@@ -389,7 +391,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
nvgpu_memset(g, mem, 0, 0, mem->size);
|
nvgpu_memset(g, mem, 0, 0, mem->size);
|
||||||
nvgpu_free(mem->allocator,
|
nvgpu_free(mem->allocator,
|
||||||
(u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
|
(u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
|
||||||
gk20a_free_sgtable(g, &mem->priv.sgt);
|
nvgpu_free_sgtable(g, &mem->priv.sgt);
|
||||||
|
|
||||||
mem->size = 0;
|
mem->size = 0;
|
||||||
mem->aperture = APERTURE_INVALID;
|
mem->aperture = APERTURE_INVALID;
|
||||||
@@ -412,9 +414,74 @@ void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
if (mem->gpu_va)
|
if (mem->gpu_va)
|
||||||
gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size,
|
gk20a_gmmu_unmap(vm, mem->gpu_va,
|
||||||
gk20a_mem_flag_none);
|
mem->size, gk20a_mem_flag_none);
|
||||||
mem->gpu_va = 0;
|
mem->gpu_va = 0;
|
||||||
|
|
||||||
nvgpu_dma_free(vm->mm->g, mem);
|
nvgpu_dma_free(vm->mm->g, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
|
||||||
|
void *cpuva, u64 iova, size_t size)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
struct sg_table *tbl;
|
||||||
|
|
||||||
|
tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
||||||
|
if (!tbl) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dma_get_sgtable(dev_from_gk20a(g), tbl, cpuva, iova, size);
|
||||||
|
if (err)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
sg_dma_address(tbl->sgl) = iova;
|
||||||
|
*sgt = tbl;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
if (tbl)
|
||||||
|
nvgpu_kfree(g, tbl);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt,
|
||||||
|
struct page **pages, u64 iova, size_t size)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
struct sg_table *tbl;
|
||||||
|
|
||||||
|
tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
||||||
|
if (!tbl) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sg_alloc_table_from_pages(tbl, pages,
|
||||||
|
DIV_ROUND_UP(size, PAGE_SIZE),
|
||||||
|
0, size, GFP_KERNEL);
|
||||||
|
if (err)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
sg_dma_address(tbl->sgl) = iova;
|
||||||
|
*sgt = tbl;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
if (tbl)
|
||||||
|
nvgpu_kfree(g, tbl);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvgpu_free_sgtable(struct gk20a *g, struct sg_table **sgt)
|
||||||
|
{
|
||||||
|
sg_free_table(*sgt);
|
||||||
|
nvgpu_kfree(g, *sgt);
|
||||||
|
*sgt = NULL;
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,8 @@
|
|||||||
#include <nvgpu/log.h>
|
#include <nvgpu/log.h>
|
||||||
#include <nvgpu/bug.h>
|
#include <nvgpu/bug.h>
|
||||||
|
|
||||||
|
#include <nvgpu/linux/dma.h>
|
||||||
|
|
||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
#include "gk20a/mm_gk20a.h"
|
#include "gk20a/mm_gk20a.h"
|
||||||
|
|
||||||
@@ -286,11 +288,11 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
|
|||||||
* is passed to us. This table gets freed by the dma free routines.
|
* is passed to us. This table gets freed by the dma free routines.
|
||||||
*/
|
*/
|
||||||
if (src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
if (src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
||||||
ret = gk20a_get_sgtable_from_pages(g->dev, &dest->priv.sgt,
|
ret = nvgpu_get_sgtable_from_pages(g, &dest->priv.sgt,
|
||||||
src->priv.pages + start_page,
|
src->priv.pages + start_page,
|
||||||
new_iova, size);
|
new_iova, size);
|
||||||
else
|
else
|
||||||
ret = gk20a_get_sgtable(g->dev, &dest->priv.sgt, dest->cpu_va,
|
ret = nvgpu_get_sgtable(g, &dest->priv.sgt, dest->cpu_va,
|
||||||
new_iova, size);
|
new_iova, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -38,6 +38,8 @@
|
|||||||
#include <nvgpu/bug.h>
|
#include <nvgpu/bug.h>
|
||||||
#include <nvgpu/log2.h>
|
#include <nvgpu/log2.h>
|
||||||
|
|
||||||
|
#include <nvgpu/linux/dma.h>
|
||||||
|
|
||||||
#include "gk20a.h"
|
#include "gk20a.h"
|
||||||
#include "mm_gk20a.h"
|
#include "mm_gk20a.h"
|
||||||
#include "fence_gk20a.h"
|
#include "fence_gk20a.h"
|
||||||
@@ -2621,7 +2623,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
|
|||||||
gk20a_gmmu_clear_vidmem_mem(g, mem);
|
gk20a_gmmu_clear_vidmem_mem(g, mem);
|
||||||
nvgpu_free(mem->allocator,
|
nvgpu_free(mem->allocator,
|
||||||
(u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
|
(u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
|
||||||
gk20a_free_sgtable(g, &mem->priv.sgt);
|
nvgpu_free_sgtable(g, &mem->priv.sgt);
|
||||||
|
|
||||||
WARN_ON(atomic64_sub_return(mem->size,
|
WARN_ON(atomic64_sub_return(mem->size,
|
||||||
&g->mm.vidmem.bytes_pending) < 0);
|
&g->mm.vidmem.bytes_pending) < 0);
|
||||||
@@ -2668,75 +2670,6 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
|
|||||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get sg_table from already allocated buffer */
|
|
||||||
int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
|
|
||||||
void *cpuva, u64 iova,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
struct gk20a *g = get_gk20a(d);
|
|
||||||
|
|
||||||
int err = 0;
|
|
||||||
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
|
||||||
if (!(*sgt)) {
|
|
||||||
nvgpu_err(g, "failed to allocate memory\n");
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
err = dma_get_sgtable(d, *sgt,
|
|
||||||
cpuva, iova,
|
|
||||||
size);
|
|
||||||
if (err) {
|
|
||||||
nvgpu_err(g, "failed to create sg table\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
sg_dma_address((*sgt)->sgl) = iova;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
fail:
|
|
||||||
if (*sgt) {
|
|
||||||
nvgpu_kfree(g, *sgt);
|
|
||||||
*sgt = NULL;
|
|
||||||
}
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
|
|
||||||
struct page **pages, u64 iova,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
int err = 0;
|
|
||||||
struct gk20a *g = get_gk20a(d);
|
|
||||||
|
|
||||||
*sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
|
||||||
if (!(*sgt)) {
|
|
||||||
nvgpu_err(g, "failed to allocate memory\n");
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
err = sg_alloc_table_from_pages(*sgt, pages,
|
|
||||||
DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL);
|
|
||||||
if (err) {
|
|
||||||
nvgpu_err(g, "failed to allocate sg_table\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
sg_dma_address((*sgt)->sgl) = iova;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
fail:
|
|
||||||
if (*sgt) {
|
|
||||||
nvgpu_kfree(get_gk20a(d), *sgt);
|
|
||||||
*sgt = NULL;
|
|
||||||
}
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt)
|
|
||||||
{
|
|
||||||
sg_free_table(*sgt);
|
|
||||||
nvgpu_kfree(g, *sgt);
|
|
||||||
*sgt = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova)
|
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova)
|
||||||
{
|
{
|
||||||
/* ensure it is not vidmem allocation */
|
/* ensure it is not vidmem allocation */
|
||||||
|
|||||||
@@ -497,16 +497,6 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm,
|
|||||||
|
|
||||||
int gk20a_mm_suspend(struct gk20a *g);
|
int gk20a_mm_suspend(struct gk20a *g);
|
||||||
|
|
||||||
int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
|
|
||||||
void *cpuva, u64 iova,
|
|
||||||
size_t size);
|
|
||||||
|
|
||||||
int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
|
|
||||||
struct page **pages, u64 iova,
|
|
||||||
size_t size);
|
|
||||||
|
|
||||||
void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt);
|
|
||||||
|
|
||||||
u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
|
u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
|
||||||
u32 flags);
|
u32 flags);
|
||||||
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova);
|
u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova);
|
||||||
|
|||||||
@@ -25,6 +25,8 @@
|
|||||||
#include <nvgpu/acr/nvgpu_acr.h>
|
#include <nvgpu/acr/nvgpu_acr.h>
|
||||||
#include <nvgpu/firmware.h>
|
#include <nvgpu/firmware.h>
|
||||||
|
|
||||||
|
#include <nvgpu/linux/dma.h>
|
||||||
|
|
||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
#include "gk20a/pmu_gk20a.h"
|
#include "gk20a/pmu_gk20a.h"
|
||||||
#include "mm_gm20b.h"
|
#include "mm_gm20b.h"
|
||||||
@@ -456,7 +458,7 @@ int prepare_ucode_blob(struct gk20a *g)
|
|||||||
gm20b_dbg_pmu("prepare ucode blob return 0\n");
|
gm20b_dbg_pmu("prepare ucode blob return 0\n");
|
||||||
free_acr_resources(g, plsfm);
|
free_acr_resources(g, plsfm);
|
||||||
free_sgt:
|
free_sgt:
|
||||||
gk20a_free_sgtable(g, &sgt);
|
nvgpu_free_sgtable(g, &sgt);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
34
drivers/gpu/nvgpu/include/nvgpu/linux/dma.h
Normal file
34
drivers/gpu/nvgpu/include/nvgpu/linux/dma.h
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __NVGPU_LINUX_DMA_H__
|
||||||
|
#define __NVGPU_LINUX_DMA_H__
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Functions used internally for building the backing SGTs for nvgpu_mems.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt,
|
||||||
|
void *cpuva, u64 iova,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt,
|
||||||
|
struct page **pages, u64 iova,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
void nvgpu_free_sgtable(struct gk20a *g, struct sg_table **sgt);
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -51,6 +51,8 @@
|
|||||||
#include <nvgpu/kmem.h>
|
#include <nvgpu/kmem.h>
|
||||||
#include <nvgpu/bug.h>
|
#include <nvgpu/bug.h>
|
||||||
|
|
||||||
|
#include <nvgpu/linux/dma.h>
|
||||||
|
|
||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
#include "gk20a/hal_gk20a.h"
|
#include "gk20a/hal_gk20a.h"
|
||||||
#include "gk20a/platform_gk20a.h"
|
#include "gk20a/platform_gk20a.h"
|
||||||
@@ -143,7 +145,7 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
|
|||||||
dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
|
dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
|
||||||
(void *)(uintptr_t)pa,
|
(void *)(uintptr_t)pa,
|
||||||
pa, __DMA_ATTR(attrs));
|
pa, __DMA_ATTR(attrs));
|
||||||
gk20a_free_sgtable(g, &desc->mem.priv.sgt);
|
nvgpu_free_sgtable(g, &desc->mem.priv.sgt);
|
||||||
desc->mem.priv.sgt = NULL;
|
desc->mem.priv.sgt = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user