mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: Delete NVGPU_DMA_FORCE_CONTIGUOUS
The flag NVGPU_DMA_FORCE_CONTIGUOUS simply means that the memory or the pages should be forced contiguous. Meanwhile, the other flag NVGPU_DMA_PHYSICALLY_ADDRESSED means that the memory should be contiguous from GPU perspective, either physically contiguous when IOMMU is not used, or virtually contiguous by IOMMU. Thus the NVGPU_DMA_FORCE_CONTIGUOUS flag is now redundant. This patch cleans up the NVGPU_DMA_FORCE_CONTIGUOUS flag. Bug 200444660 Change-Id: I63bb06fea728b34ec2c6f831504392d42c426d55 Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2035403 GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
ac3c3e2b69
commit
a8e6d13652
@@ -181,7 +181,7 @@ static int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
|
||||
* underlying allocations to be physically contiguous as well.
|
||||
*/
|
||||
if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) {
|
||||
flags = NVGPU_DMA_FORCE_CONTIGUOUS;
|
||||
flags = NVGPU_DMA_PHYSICALLY_ADDRESSED;
|
||||
}
|
||||
|
||||
err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -43,16 +43,10 @@ struct nvgpu_mem;
|
||||
*/
|
||||
#define NVGPU_DMA_NO_KERNEL_MAPPING BIT64(0)
|
||||
|
||||
/*
|
||||
* Don't allow building the buffer from individual pages but require a
|
||||
* physically contiguous block.
|
||||
*/
|
||||
#define NVGPU_DMA_FORCE_CONTIGUOUS BIT64(1)
|
||||
|
||||
/*
|
||||
* Make the mapping read-only.
|
||||
*/
|
||||
#define NVGPU_DMA_READ_ONLY BIT64(2)
|
||||
#define NVGPU_DMA_READ_ONLY BIT64(1)
|
||||
|
||||
/*
|
||||
* Buffer is physically addressed from the GPU.
|
||||
@@ -60,7 +54,7 @@ struct nvgpu_mem;
|
||||
* the buffer from individual pages, but require a physically contiguous
|
||||
* block.
|
||||
*/
|
||||
#define NVGPU_DMA_PHYSICALLY_ADDRESSED BIT64(3)
|
||||
#define NVGPU_DMA_PHYSICALLY_ADDRESSED BIT64(2)
|
||||
|
||||
|
||||
/**
|
||||
@@ -107,7 +101,6 @@ int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||
* The following flags are accepted:
|
||||
*
|
||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||
* %NVGPU_DMA_READ_ONLY
|
||||
*/
|
||||
int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||
@@ -141,7 +134,6 @@ int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||
* The following flags are accepted:
|
||||
*
|
||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||
* %NVGPU_DMA_READ_ONLY
|
||||
*/
|
||||
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
@@ -274,7 +266,6 @@ int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||
* flags are:
|
||||
*
|
||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||
* %NVGPU_DMA_READ_ONLY
|
||||
*/
|
||||
int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||
@@ -308,7 +299,6 @@ int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||
* flags are:
|
||||
*
|
||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||
* %NVGPU_DMA_READ_ONLY
|
||||
*/
|
||||
int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||
@@ -342,7 +332,6 @@ int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
||||
* flags are:
|
||||
*
|
||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||
* %NVGPU_DMA_READ_ONLY
|
||||
*/
|
||||
int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
* added it must be added here as well!!
|
||||
*/
|
||||
#define NVGPU_DMA_STR_SIZE \
|
||||
sizeof("NO_KERNEL_MAPPING FORCE_CONTIGUOUS PHYSICALLY_ADDRESSED")
|
||||
sizeof("NO_KERNEL_MAPPING PHYSICALLY_ADDRESSED")
|
||||
|
||||
/*
|
||||
* This function can't fail. It will always at minimum memset() the buf which
|
||||
@@ -60,7 +60,6 @@ void nvgpu_dma_flags_to_str(struct gk20a *g, unsigned long flags, char *buf)
|
||||
} while (false)
|
||||
|
||||
APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
|
||||
APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
|
||||
APPEND_FLAG(NVGPU_DMA_PHYSICALLY_ADDRESSED, "PHYSICALLY_ADDRESSED");
|
||||
#undef APPEND_FLAG
|
||||
}
|
||||
@@ -151,12 +150,16 @@ static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
|
||||
unsigned long flags)
|
||||
/* Check if IOMMU is available and if GPU uses it */
|
||||
#define nvgpu_uses_iommu(g) \
|
||||
(nvgpu_iommuable(g) && !nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG))
|
||||
|
||||
static void nvgpu_dma_flags_to_attrs(struct gk20a *g, unsigned long *attrs,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
||||
*attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
|
||||
if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
|
||||
if (flags & NVGPU_DMA_PHYSICALLY_ADDRESSED && !nvgpu_uses_iommu(g))
|
||||
*attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
}
|
||||
|
||||
@@ -174,12 +177,6 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
if ((flags & NVGPU_DMA_PHYSICALLY_ADDRESSED) &&
|
||||
(!nvgpu_iommuable(g) ||
|
||||
nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG))) {
|
||||
flags |= NVGPU_DMA_FORCE_CONTIGUOUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Before the debug print so we see this in the total. But during
|
||||
* cleanup in the fail path this has to be subtracted.
|
||||
@@ -195,7 +192,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
mem->size = size;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
||||
nvgpu_dma_flags_to_attrs(g, &dma_attrs, flags);
|
||||
alloc_ret = dma_alloc_attrs(d, size, &iova,
|
||||
GFP_KERNEL|__GFP_ZERO, dma_attrs);
|
||||
if (!alloc_ret) {
|
||||
@@ -343,7 +340,7 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
||||
cpu_addr = mem->priv.pages;
|
||||
|
||||
nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
|
||||
nvgpu_dma_flags_to_attrs(g, &dma_attrs, mem->priv.flags);
|
||||
dma_free_attrs(d, mem->aligned_size, cpu_addr,
|
||||
sg_dma_address(mem->priv.sgt->sgl), dma_attrs);
|
||||
|
||||
@@ -424,7 +421,7 @@ int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
||||
nvgpu_dma_flags_to_attrs(g, &dma_attrs, flags);
|
||||
err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova,
|
||||
size, dma_attrs);
|
||||
if (err)
|
||||
|
||||
Reference in New Issue
Block a user