mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Clean up dma_attrs handling code
The dma_attr type is changed from "struct" to "unsigned long" after kernel 4.4 Remove all such dma_attrs handling instances. Bug 2485656 Change-Id: I07052df763d9d77b0be824a9303da2240d17c701 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2002701 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
ba9bbacdfd
commit
4af6d70713
@@ -33,17 +33,6 @@
|
|||||||
#include "os_linux.h"
|
#include "os_linux.h"
|
||||||
#include "dmabuf_vidmem.h"
|
#include "dmabuf_vidmem.h"
|
||||||
|
|
||||||
#ifdef __DMA_ATTRS_LONGS
|
|
||||||
#define NVGPU_DEFINE_DMA_ATTRS(x) \
|
|
||||||
struct dma_attrs x = { \
|
|
||||||
.flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
|
|
||||||
}
|
|
||||||
#define NVGPU_DMA_ATTR(attrs) &attrs
|
|
||||||
#else
|
|
||||||
#define NVGPU_DEFINE_DMA_ATTRS(attrs) unsigned long attrs = 0
|
|
||||||
#define NVGPU_DMA_ATTR(attrs) attrs
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enough to hold all the possible flags in string form. When a new flag is
|
* Enough to hold all the possible flags in string form. When a new flag is
|
||||||
* added it must be added here as well!!
|
* added it must be added here as well!!
|
||||||
@@ -157,21 +146,13 @@ static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, u64 at,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
|
|
||||||
static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
|
static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
#define ATTR_ARG(x) *x
|
|
||||||
#else
|
|
||||||
static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs,
|
|
||||||
unsigned long flags)
|
|
||||||
#define ATTR_ARG(x) x
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, ATTR_ARG(attrs));
|
*attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
|
||||||
if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
|
if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
|
||||||
dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, ATTR_ARG(attrs));
|
*attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
|
||||||
#undef ATTR_ARG
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||||
@@ -180,7 +161,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
struct device *d = dev_from_gk20a(g);
|
struct device *d = dev_from_gk20a(g);
|
||||||
int err;
|
int err;
|
||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
|
unsigned long dma_attrs = 0;
|
||||||
void *alloc_ret;
|
void *alloc_ret;
|
||||||
|
|
||||||
if (nvgpu_mem_is_valid(mem)) {
|
if (nvgpu_mem_is_valid(mem)) {
|
||||||
@@ -212,8 +193,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
||||||
|
|
||||||
alloc_ret = dma_alloc_attrs(d, size, &iova,
|
alloc_ret = dma_alloc_attrs(d, size, &iova,
|
||||||
GFP_KERNEL|__GFP_ZERO,
|
GFP_KERNEL|__GFP_ZERO, dma_attrs);
|
||||||
NVGPU_DMA_ATTR(dma_attrs));
|
|
||||||
if (!alloc_ret)
|
if (!alloc_ret)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@@ -239,7 +219,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free_dma:
|
fail_free_dma:
|
||||||
dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs));
|
dma_free_attrs(d, size, alloc_ret, iova, dma_attrs);
|
||||||
mem->cpu_va = NULL;
|
mem->cpu_va = NULL;
|
||||||
mem->priv.sgt = NULL;
|
mem->priv.sgt = NULL;
|
||||||
mem->size = 0;
|
mem->size = 0;
|
||||||
@@ -334,6 +314,7 @@ fail_physfree:
|
|||||||
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
struct device *d = dev_from_gk20a(g);
|
struct device *d = dev_from_gk20a(g);
|
||||||
|
unsigned long dma_attrs = 0;
|
||||||
|
|
||||||
g->dma_memory_used -= mem->aligned_size;
|
g->dma_memory_used -= mem->aligned_size;
|
||||||
|
|
||||||
@@ -343,18 +324,17 @@ void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
!(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
|
!(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
|
||||||
(mem->cpu_va || mem->priv.pages)) {
|
(mem->cpu_va || mem->priv.pages)) {
|
||||||
if (mem->priv.flags) {
|
if (mem->priv.flags) {
|
||||||
NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
|
|
||||||
|
|
||||||
nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
|
nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
|
||||||
|
|
||||||
if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
||||||
dma_free_attrs(d, mem->aligned_size, mem->priv.pages,
|
dma_free_attrs(d, mem->aligned_size, mem->priv.pages,
|
||||||
sg_dma_address(mem->priv.sgt->sgl),
|
sg_dma_address(mem->priv.sgt->sgl),
|
||||||
NVGPU_DMA_ATTR(dma_attrs));
|
dma_attrs);
|
||||||
} else {
|
} else {
|
||||||
dma_free_attrs(d, mem->aligned_size, mem->cpu_va,
|
dma_free_attrs(d, mem->aligned_size, mem->cpu_va,
|
||||||
sg_dma_address(mem->priv.sgt->sgl),
|
sg_dma_address(mem->priv.sgt->sgl),
|
||||||
NVGPU_DMA_ATTR(dma_attrs));
|
dma_attrs);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dma_free_coherent(d, mem->aligned_size, mem->cpu_va,
|
dma_free_coherent(d, mem->aligned_size, mem->cpu_va,
|
||||||
@@ -427,7 +407,7 @@ int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt,
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
struct sg_table *tbl;
|
struct sg_table *tbl;
|
||||||
NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
|
unsigned long dma_attrs = 0;
|
||||||
|
|
||||||
tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
tbl = nvgpu_kzalloc(g, sizeof(struct sg_table));
|
||||||
if (!tbl) {
|
if (!tbl) {
|
||||||
@@ -437,7 +417,7 @@ int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt,
|
|||||||
|
|
||||||
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
||||||
err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova,
|
err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova,
|
||||||
size, NVGPU_DMA_ATTR(dma_attrs));
|
size, dma_attrs);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,6 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <uapi/linux/nvgpu.h>
|
#include <uapi/linux/nvgpu.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/dma-attrs.h>
|
|
||||||
#include <linux/nvmap.h>
|
#include <linux/nvmap.h>
|
||||||
#include <linux/reset.h>
|
#include <linux/reset.h>
|
||||||
#if defined(CONFIG_TEGRA_DVFS)
|
#if defined(CONFIG_TEGRA_DVFS)
|
||||||
@@ -92,11 +91,9 @@ struct gk20a_emc_params {
|
|||||||
static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
|
static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
|
||||||
struct secure_page_buffer *secure_buffer)
|
struct secure_page_buffer *secure_buffer)
|
||||||
{
|
{
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
|
|
||||||
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
|
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
|
||||||
(void *)(uintptr_t)secure_buffer->phys,
|
(void *)(uintptr_t)secure_buffer->phys,
|
||||||
secure_buffer->phys, __DMA_ATTR(attrs));
|
secure_buffer->phys, DMA_ATTR_NO_KERNEL_MAPPING);
|
||||||
|
|
||||||
secure_buffer->destroy = NULL;
|
secure_buffer->destroy = NULL;
|
||||||
}
|
}
|
||||||
@@ -624,7 +621,6 @@ int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform)
|
|||||||
{
|
{
|
||||||
struct gk20a *g = platform->g;
|
struct gk20a *g = platform->g;
|
||||||
struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
|
struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
|
||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
|
|
||||||
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
|
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
|
||||||
@@ -633,9 +629,8 @@ int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform)
|
|||||||
#if PAGE_SIZE > 4096
|
#if PAGE_SIZE > 4096
|
||||||
platform->secure_buffer_size += SZ_64K;
|
platform->secure_buffer_size += SZ_64K;
|
||||||
#endif
|
#endif
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
|
|
||||||
(void)dma_alloc_attrs(&tegra_vpr_dev, platform->secure_buffer_size, &iova,
|
(void)dma_alloc_attrs(&tegra_vpr_dev, platform->secure_buffer_size, &iova,
|
||||||
GFP_KERNEL, __DMA_ATTR(attrs));
|
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
|
||||||
/* Some platforms disable VPR. In that case VPR allocations always
|
/* Some platforms disable VPR. In that case VPR allocations always
|
||||||
* fail. Just disable VPR usage in nvgpu in that case. */
|
* fail. Just disable VPR usage in nvgpu in that case. */
|
||||||
if (dma_mapping_error(&tegra_vpr_dev, iova))
|
if (dma_mapping_error(&tegra_vpr_dev, iova))
|
||||||
|
|||||||
Reference in New Issue
Block a user