diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 16c54849b..686619cfa 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -31,6 +31,7 @@ nvgpu-y := \
common/linux/ioctl_channel.o \
common/linux/ioctl_tsg.o \
common/linux/log.o \
+ common/linux/mem_desc.o \
common/mm/nvgpu_allocator.o \
common/mm/bitmap_allocator.o \
common/mm/buddy_allocator.o \
diff --git a/drivers/gpu/nvgpu/common/linux/mem_desc.c b/drivers/gpu/nvgpu/common/linux/mem_desc.c
new file mode 100644
index 000000000..b2ef122ea
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/mem_desc.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+
+#include "gk20a/gk20a.h"
+#include "gk20a/mm_gk20a.h"
+
+u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture,
+ u32 sysmem_mask, u32 vidmem_mask)
+{
+ switch (aperture) {
+ case APERTURE_SYSMEM:
+ /* sysmem for dgpus; some igpus consider system memory vidmem */
+ return g->mm.vidmem_is_vidmem ? sysmem_mask : vidmem_mask;
+ case APERTURE_VIDMEM:
+ /* for dgpus only */
+ return vidmem_mask;
+ case APERTURE_INVALID:
+ WARN_ON("Bad aperture");
+ }
+ return 0;
+}
+
+u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
+ u32 sysmem_mask, u32 vidmem_mask)
+{
+ return __gk20a_aperture_mask(g, mem->aperture,
+ sysmem_mask, vidmem_mask);
+}
+
+int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
+{
+ void *cpu_va;
+
+ if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
+ return 0;
+
+ if (WARN_ON(mem->cpu_va)) {
+ gk20a_warn(dev_from_gk20a(g), "nested %s", __func__);
+ return -EBUSY;
+ }
+
+ cpu_va = vmap(mem->pages,
+ PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
+ 0, pgprot_writecombine(PAGE_KERNEL));
+
+ if (WARN_ON(!cpu_va))
+ return -ENOMEM;
+
+ mem->cpu_va = cpu_va;
+ return 0;
+}
+
+void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem)
+{
+ if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
+ return;
+
+ vunmap(mem->cpu_va);
+ mem->cpu_va = NULL;
+}
+
+u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
+{
+ u32 data = 0;
+
+ if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
+ u32 *ptr = mem->cpu_va;
+
+ WARN_ON(!ptr);
+ data = ptr[w];
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
+#endif
+ } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
+ u32 value;
+ u32 *p = &value;
+
+ nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
+ sizeof(u32), pramin_access_batch_rd_n, &p);
+
+ data = value;
+
+ } else {
+ WARN_ON("Accessing unallocated mem_desc");
+ }
+
+ return data;
+}
+
+u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset)
+{
+ WARN_ON(offset & 3);
+ return gk20a_mem_rd32(g, mem, offset / sizeof(u32));
+}
+
+void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
+ u32 offset, void *dest, u32 size)
+{
+ WARN_ON(offset & 3);
+ WARN_ON(size & 3);
+
+ if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
+ u8 *src = (u8 *)mem->cpu_va + offset;
+
+ WARN_ON(!mem->cpu_va);
+ memcpy(dest, src, size);
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ if (size)
+ gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
+ src, *dest, size);
+#endif
+ } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
+ u32 *dest_u32 = dest;
+
+ nvgpu_pramin_access_batched(g, mem, offset, size,
+ pramin_access_batch_rd_n, &dest_u32);
+ } else {
+ WARN_ON("Accessing unallocated mem_desc");
+ }
+}
+
+void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
+{
+ if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
+ u32 *ptr = mem->cpu_va;
+
+ WARN_ON(!ptr);
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
+#endif
+ ptr[w] = data;
+ } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
+ u32 value = data;
+ u32 *p = &value;
+
+ nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
+ sizeof(u32), pramin_access_batch_wr_n, &p);
+ if (!mem->skip_wmb)
+ wmb();
+ } else {
+ WARN_ON("Accessing unallocated mem_desc");
+ }
+}
+
+void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data)
+{
+ WARN_ON(offset & 3);
+ gk20a_mem_wr32(g, mem, offset / sizeof(u32), data);
+}
+
+void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
+ void *src, u32 size)
+{
+ WARN_ON(offset & 3);
+ WARN_ON(size & 3);
+
+ if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
+ u8 *dest = (u8 *)mem->cpu_va + offset;
+
+ WARN_ON(!mem->cpu_va);
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ if (size)
+ gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
+ dest, *src, size);
+#endif
+ memcpy(dest, src, size);
+ } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
+ u32 *src_u32 = src;
+
+ nvgpu_pramin_access_batched(g, mem, offset, size,
+ pramin_access_batch_wr_n, &src_u32);
+ if (!mem->skip_wmb)
+ wmb();
+ } else {
+ WARN_ON("Accessing unallocated mem_desc");
+ }
+}
+
+void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
+ u32 c, u32 size)
+{
+ WARN_ON(offset & 3);
+ WARN_ON(size & 3);
+ WARN_ON(c & ~0xff);
+
+ c &= 0xff;
+
+ if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
+ u8 *dest = (u8 *)mem->cpu_va + offset;
+
+ WARN_ON(!mem->cpu_va);
+#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
+ if (size)
+ gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]",
+ dest, c, size);
+#endif
+ memset(dest, c, size);
+ } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
+ u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
+ u32 *p = &repeat_value;
+
+ nvgpu_pramin_access_batched(g, mem, offset, size,
+ pramin_access_batch_set, &p);
+ if (!mem->skip_wmb)
+ wmb();
+ } else {
+ WARN_ON("Accessing unallocated mem_desc");
+ }
+}
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 2fe76d80b..e78eb9412 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -56,21 +56,18 @@
static void gk20a_vidmem_clear_mem_worker(struct work_struct *work);
#endif
-static inline void
-set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr)
+void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr)
{
/* set bit 0 to indicate vidmem allocation */
sg_dma_address(sgl) = (addr | 1ULL);
}
-static inline bool
-is_vidmem_page_alloc(u64 addr)
+bool is_vidmem_page_alloc(u64 addr)
{
return !!(addr & 1ULL);
}
-struct nvgpu_page_alloc *
-get_vidmem_page_alloc(struct scatterlist *sgl)
+struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl)
{
u64 addr;
@@ -84,187 +81,6 @@ get_vidmem_page_alloc(struct scatterlist *sgl)
return (struct nvgpu_page_alloc *)(uintptr_t)addr;
}
-int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
-{
- void *cpu_va;
-
- if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
- return 0;
-
- if (WARN_ON(mem->cpu_va)) {
- gk20a_warn(dev_from_gk20a(g), "nested %s", __func__);
- return -EBUSY;
- }
-
- cpu_va = vmap(mem->pages,
- PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
- 0, pgprot_writecombine(PAGE_KERNEL));
-
- if (WARN_ON(!cpu_va))
- return -ENOMEM;
-
- mem->cpu_va = cpu_va;
- return 0;
-}
-
-void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem)
-{
- if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
- return;
-
- vunmap(mem->cpu_va);
- mem->cpu_va = NULL;
-}
-
-u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
-{
- u32 data = 0;
-
- if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
- u32 *ptr = mem->cpu_va;
-
- WARN_ON(!ptr);
- data = ptr[w];
-#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
- gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
-#endif
- } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
- u32 value;
- u32 *p = &value;
-
- nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
- sizeof(u32), pramin_access_batch_rd_n, &p);
-
- data = value;
-
- } else {
- WARN_ON("Accessing unallocated mem_desc");
- }
-
- return data;
-}
-
-u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset)
-{
- WARN_ON(offset & 3);
- return gk20a_mem_rd32(g, mem, offset / sizeof(u32));
-}
-
-void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
- u32 offset, void *dest, u32 size)
-{
- WARN_ON(offset & 3);
- WARN_ON(size & 3);
-
- if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
- u8 *src = (u8 *)mem->cpu_va + offset;
-
- WARN_ON(!mem->cpu_va);
- memcpy(dest, src, size);
-#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
- if (size)
- gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
- src, *dest, size);
-#endif
- } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
- u32 *dest_u32 = dest;
-
- nvgpu_pramin_access_batched(g, mem, offset, size,
- pramin_access_batch_rd_n, &dest_u32);
- } else {
- WARN_ON("Accessing unallocated mem_desc");
- }
-}
-
-void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
-{
- if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
- u32 *ptr = mem->cpu_va;
-
- WARN_ON(!ptr);
-#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
- gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
-#endif
- ptr[w] = data;
- } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
- u32 value = data;
- u32 *p = &value;
-
- nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
- sizeof(u32), pramin_access_batch_wr_n, &p);
- if (!mem->skip_wmb)
- wmb();
- } else {
- WARN_ON("Accessing unallocated mem_desc");
- }
-}
-
-void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data)
-{
- WARN_ON(offset & 3);
- gk20a_mem_wr32(g, mem, offset / sizeof(u32), data);
-}
-
-void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
- void *src, u32 size)
-{
- WARN_ON(offset & 3);
- WARN_ON(size & 3);
-
- if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
- u8 *dest = (u8 *)mem->cpu_va + offset;
-
- WARN_ON(!mem->cpu_va);
-#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
- if (size)
- gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
- dest, *src, size);
-#endif
- memcpy(dest, src, size);
- } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
- u32 *src_u32 = src;
-
- nvgpu_pramin_access_batched(g, mem, offset, size,
- pramin_access_batch_wr_n, &src_u32);
- if (!mem->skip_wmb)
- wmb();
- } else {
- WARN_ON("Accessing unallocated mem_desc");
- }
-}
-
-void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
- u32 c, u32 size)
-{
- WARN_ON(offset & 3);
- WARN_ON(size & 3);
- WARN_ON(c & ~0xff);
-
- c &= 0xff;
-
- if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
- u8 *dest = (u8 *)mem->cpu_va + offset;
-
- WARN_ON(!mem->cpu_va);
-#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
- if (size)
- gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]",
- dest, c, size);
-#endif
- memset(dest, c, size);
- } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
- u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
- u32 *p = &repeat_value;
-
- nvgpu_pramin_access_batched(g, mem, offset, size,
- pramin_access_batch_set, &p);
- if (!mem->skip_wmb)
- wmb();
- } else {
- WARN_ON("Accessing unallocated mem_desc");
- }
-}
-
/*
* GPU mapping life cycle
* ======================
@@ -3121,29 +2937,6 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
}
#endif
-u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture,
- u32 sysmem_mask, u32 vidmem_mask)
-{
- switch (aperture) {
- case APERTURE_SYSMEM:
- /* sysmem for dgpus; some igpus consider system memory vidmem */
- return g->mm.vidmem_is_vidmem ? sysmem_mask : vidmem_mask;
- case APERTURE_VIDMEM:
- /* for dgpus only */
- return vidmem_mask;
- case APERTURE_INVALID:
- WARN_ON("Bad aperture");
- }
- return 0;
-}
-
-u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
- u32 sysmem_mask, u32 vidmem_mask)
-{
- return __gk20a_aperture_mask(g, mem->aperture,
- sysmem_mask, vidmem_mask);
-}
-
int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
struct mem_desc *mem)
{
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index b425ec5c0..da8bbb0af 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -24,6 +24,7 @@
#include
#include
+#include
#include
#include
#include
@@ -39,60 +40,12 @@
outer_flush_range(pa, pa + (size_t)(size)); \
} while (0)
-/*
- * Real location of a buffer - gk20a_aperture_mask() will deduce what will be
- * told to the gpu about the aperture, but this flag designates where the
- * memory actually was allocated from.
- */
-enum gk20a_aperture {
- APERTURE_INVALID, /* unallocated or N/A */
- APERTURE_SYSMEM,
- APERTURE_VIDMEM
-};
-
enum gk20a_mem_rw_flag {
gk20a_mem_flag_none = 0,
gk20a_mem_flag_read_only = 1,
gk20a_mem_flag_write_only = 2,
};
-static inline const char *gk20a_aperture_str(enum gk20a_aperture aperture)
-{
- switch (aperture) {
- case APERTURE_INVALID: return "invalid";
- case APERTURE_SYSMEM: return "sysmem";
- case APERTURE_VIDMEM: return "vidmem";
- };
- return "UNKNOWN";
-}
-
-struct mem_desc {
- void *cpu_va; /* sysmem only */
- struct page **pages; /* sysmem only */
- struct sg_table *sgt;
- enum gk20a_aperture aperture;
- size_t size;
- u64 gpu_va;
- bool fixed; /* vidmem only */
- bool user_mem; /* vidmem only */
- struct nvgpu_allocator *allocator; /* vidmem only */
- struct nvgpu_list_node clear_list_entry; /* vidmem only */
- bool skip_wmb;
- unsigned long flags;
-};
-
-static inline struct mem_desc *
-mem_desc_from_clear_list_entry(struct nvgpu_list_node *node)
-{
- return (struct mem_desc *)
- ((uintptr_t)node - offsetof(struct mem_desc, clear_list_entry));
-};
-
-struct mem_desc_sub {
- u32 offset;
- u32 size;
-};
-
struct gpfifo_desc {
struct mem_desc mem;
u32 entry_num;
@@ -511,36 +464,9 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
u64 base, u64 size);
enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
-struct nvgpu_page_alloc *
-get_vidmem_page_alloc(struct scatterlist *sgl);
-
-/*
- * Buffer accessors - wrap between begin() and end() if there is no permanent
- * kernel mapping for this buffer.
- */
-
-int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem);
-/* nop for null mem, like with free() or vunmap() */
-void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem);
-
-/* word-indexed offset */
-u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w);
-/* byte offset (32b-aligned) */
-u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset);
-/* memcpy to cpu, offset and size in bytes (32b-aligned) */
-void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
- void *dest, u32 size);
-
-/* word-indexed offset */
-void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data);
-/* byte offset (32b-aligned) */
-void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data);
-/* memcpy from cpu, offset and size in bytes (32b-aligned) */
-void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
- void *src, u32 size);
-/* size and offset in bytes (32b-aligned), filled with the constant byte c */
-void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
- u32 c, u32 size);
+void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr);
+bool is_vidmem_page_alloc(u64 addr);
+struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl);
#if 0 /*related to addr bits above, concern below TBD on which is accurate */
#define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h b/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h
new file mode 100644
index 000000000..528fd7bc4
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef __NVGPU_MEM_DESC_H__
+#define __NVGPU_MEM_DESC_H__
+
+#include
+
+#include
+
+struct page;
+struct sg_table;
+
+struct gk20a;
+struct nvgpu_allocator;
+
+/*
+ * Real location of a buffer - gk20a_aperture_mask() will deduce what will be
+ * told to the gpu about the aperture, but this flag designates where the
+ * memory actually was allocated from.
+ */
+enum gk20a_aperture {
+ APERTURE_INVALID, /* unallocated or N/A */
+ APERTURE_SYSMEM,
+ APERTURE_VIDMEM
+};
+
+struct mem_desc {
+ void *cpu_va; /* sysmem only */
+ struct page **pages; /* sysmem only */
+ struct sg_table *sgt;
+ enum gk20a_aperture aperture;
+ size_t size;
+ u64 gpu_va;
+ bool fixed; /* vidmem only */
+ bool user_mem; /* vidmem only */
+ struct nvgpu_allocator *allocator; /* vidmem only */
+ struct nvgpu_list_node clear_list_entry; /* vidmem only */
+ bool skip_wmb;
+ unsigned long flags;
+};
+
+static inline struct mem_desc *
+mem_desc_from_clear_list_entry(struct nvgpu_list_node *node)
+{
+ return (struct mem_desc *)
+ ((uintptr_t)node - offsetof(struct mem_desc, clear_list_entry));
+};
+
+struct mem_desc_sub {
+ u32 offset;
+ u32 size;
+};
+
+static inline const char *gk20a_aperture_str(enum gk20a_aperture aperture)
+{
+ switch (aperture) {
+ case APERTURE_INVALID: return "invalid";
+ case APERTURE_SYSMEM: return "sysmem";
+ case APERTURE_VIDMEM: return "vidmem";
+ };
+ return "UNKNOWN";
+}
+
+/*
+ * Buffer accessors - wrap between begin() and end() if there is no permanent
+ * kernel mapping for this buffer.
+ */
+
+int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem);
+/* nop for null mem, like with free() or vunmap() */
+void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem);
+
+/* word-indexed offset */
+u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w);
+/* byte offset (32b-aligned) */
+u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset);
+/* memcpy to cpu, offset and size in bytes (32b-aligned) */
+void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
+ void *dest, u32 size);
+
+/* word-indexed offset */
+void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data);
+/* byte offset (32b-aligned) */
+void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data);
+/* memcpy from cpu, offset and size in bytes (32b-aligned) */
+void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
+ void *src, u32 size);
+/* size and offset in bytes (32b-aligned), filled with the constant byte c */
+void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
+ u32 c, u32 size);
+
+u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture,
+ u32 sysmem_mask, u32 vidmem_mask);
+u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
+ u32 sysmem_mask, u32 vidmem_mask);
+
+#endif