gpu: nvgpu: Add nvgpu_os_buffer

Add a generic nvgpu_os_buffer type, defined by each OS, to abstract
a "user" buffer. This allows the comptag interface to be used in the
core code.

The end goal of this patch is to allow the OS specific mapping code
to call a generic mapping function that handles most of the mapping
logic. The problem is a lot of the logic involves comptags which are
highly dependent on the operating systems buffer management scheme.
With this, each OS can implement the buffer comptag mechanics
however it wishes without the core MM code caring.

JIRA NVGPU-30
JIRA NVGPU-223

Change-Id: Iaf64bc52e01ef3f262b4f8f9173a84384db7dc3e
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1583986
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-10-19 15:34:47 -07:00
committed by mobile promotions
parent ee4970a33f
commit 8428c82c81
7 changed files with 41 additions and 23 deletions

View File

@@ -973,6 +973,10 @@ __releases(&l->cde_app->mutex)
struct gk20a *g = &l->g; struct gk20a *g = &l->g;
struct gk20a_cde_ctx *cde_ctx = NULL; struct gk20a_cde_ctx *cde_ctx = NULL;
struct gk20a_comptags comptags; struct gk20a_comptags comptags;
struct nvgpu_os_buffer os_buf = {
compbits_scatter_buf,
dev_from_gk20a(g)
};
u64 mapped_compbits_offset = 0; u64 mapped_compbits_offset = 0;
u64 compbits_size = 0; u64 compbits_size = 0;
u64 mapped_scatterbuffer_offset = 0; u64 mapped_scatterbuffer_offset = 0;
@@ -1103,7 +1107,7 @@ __releases(&l->cde_app->mutex)
} }
/* store source buffer compression tags */ /* store source buffer compression tags */
gk20a_get_comptags(dev_from_gk20a(g), compbits_scatter_buf, &comptags); gk20a_get_comptags(&os_buf, &comptags);
cde_ctx->surf_param_offset = comptags.offset; cde_ctx->surf_param_offset = comptags.offset;
cde_ctx->surf_param_lines = comptags.lines; cde_ctx->surf_param_lines = comptags.lines;

View File

@@ -18,12 +18,15 @@
#include <nvgpu/comptags.h> #include <nvgpu/comptags.h>
#include <nvgpu/linux/vm.h>
#include "dmabuf.h" #include "dmabuf.h"
void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
struct gk20a_comptags *comptags) struct gk20a_comptags *comptags)
{ {
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
buf->dev);
if (!comptags) if (!comptags)
return; return;
@@ -37,12 +40,12 @@ void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
} }
int gk20a_alloc_comptags(struct gk20a *g, int gk20a_alloc_comptags(struct gk20a *g,
struct device *dev, struct nvgpu_os_buffer *buf,
struct dma_buf *dmabuf,
struct gk20a_comptag_allocator *allocator, struct gk20a_comptag_allocator *allocator,
u32 lines) u32 lines)
{ {
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
buf->dev);
u32 ctaglines_allocsize; u32 ctaglines_allocsize;
u32 offset; u32 offset;
int err; int err;

View File

@@ -48,19 +48,6 @@ struct gk20a_dmabuf_priv {
u64 buffer_id; u64 buffer_id;
}; };
/*
* These are implemented in common/linux/comptags.c - these are dmabuf related
* functions though so they are defined here. They cannot be defined in
* <nvgpu/comptags.h> since that file must be OS agnostic.
*/
int gk20a_alloc_comptags(struct gk20a *g,
struct device *dev,
struct dma_buf *dmabuf,
struct gk20a_comptag_allocator *allocator,
u32 lines);
void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
struct gk20a_comptags *comptags);
struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf); struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf);
void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
struct sg_table *sgt); struct sg_table *sgt);

View File

@@ -165,6 +165,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
struct nvgpu_sgt *nvgpu_sgt = NULL; struct nvgpu_sgt *nvgpu_sgt = NULL;
struct sg_table *sgt; struct sg_table *sgt;
struct nvgpu_mapped_buf *mapped_buffer = NULL; struct nvgpu_mapped_buf *mapped_buffer = NULL;
struct nvgpu_os_buffer os_buf = { dmabuf, dev };
enum nvgpu_aperture aperture; enum nvgpu_aperture aperture;
bool va_allocated = false; bool va_allocated = false;
bool clear_ctags = false; bool clear_ctags = false;
@@ -277,11 +278,11 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
if (!vm->enable_ctag) if (!vm->enable_ctag)
binfo.ctag_lines = 0; binfo.ctag_lines = 0;
gk20a_get_comptags(dev, dmabuf, &comptags); gk20a_get_comptags(&os_buf, &comptags);
if (binfo.ctag_lines && !comptags.lines) { if (binfo.ctag_lines && !comptags.lines) {
/* allocate compression resources if needed */ /* allocate compression resources if needed */
err = gk20a_alloc_comptags(g, dev, dmabuf, err = gk20a_alloc_comptags(g, &os_buf,
&g->gr.comp_tags, &g->gr.comp_tags,
binfo.ctag_lines); binfo.ctag_lines);
if (err) { if (err) {
@@ -296,8 +297,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
goto clean_up; goto clean_up;
} }
} else { } else {
gk20a_get_comptags(dev, gk20a_get_comptags(&os_buf, &comptags);
dmabuf, &comptags);
if (g->ops.ltc.cbc_ctrl) if (g->ops.ltc.cbc_ctrl)
g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_clear, g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_clear,

View File

@@ -20,6 +20,7 @@
#include <nvgpu/lock.h> #include <nvgpu/lock.h>
struct gk20a; struct gk20a;
struct nvgpu_os_buffer;
struct gk20a_comptags { struct gk20a_comptags {
u32 offset; u32 offset;
@@ -52,6 +53,17 @@ int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator, void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
u32 offset, u32 len); u32 offset, u32 len);
/*
* Defined by OS specific code since comptags are stored in a highly OS specific
* way.
*/
int gk20a_alloc_comptags(struct gk20a *g,
struct nvgpu_os_buffer *buf,
struct gk20a_comptag_allocator *allocator,
u32 lines);
void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
struct gk20a_comptags *comptags);
#endif #endif

View File

@@ -33,11 +33,17 @@
struct sg_table; struct sg_table;
struct dma_buf; struct dma_buf;
struct device;
struct vm_gk20a; struct vm_gk20a;
struct vm_gk20a_mapping_batch; struct vm_gk20a_mapping_batch;
struct nvgpu_vm_area; struct nvgpu_vm_area;
struct nvgpu_os_buffer {
struct dma_buf *dmabuf;
struct device *dev;
};
/* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set */ /* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set */
int nvgpu_vm_map_linux(struct vm_gk20a *vm, int nvgpu_vm_map_linux(struct vm_gk20a *vm,
struct dma_buf *dmabuf, struct dma_buf *dmabuf,

View File

@@ -107,6 +107,12 @@ struct nvgpu_mapped_buf {
bool va_allocated; bool va_allocated;
}; };
/*
* Defined by each OS. Allows the common VM code do things to the OS specific
* buffer structures.
*/
struct nvgpu_os_buffer;
static inline struct nvgpu_mapped_buf * static inline struct nvgpu_mapped_buf *
nvgpu_mapped_buf_from_buffer_list(struct nvgpu_list_node *node) nvgpu_mapped_buf_from_buffer_list(struct nvgpu_list_node *node)
{ {