Files
linux-nvgpu/drivers/gpu/nvgpu/common/linux/dmabuf.c
Alex Waterman 0c5d0c6a9e gpu: nvgpu: Begin reorganizing VM mapping/unmapping
Move vm_priv.h to <nvgpu/linux/vm.h> and rename nvgpu_vm_map()
to nvgpu_vm_map_linux(). Also remove a redundant unmap function
from the unmap path. These changes are the beginning of reworking
the nvgpu Linux mapping and unmapping code.

The rest of this patch is just the necessary changes to use the
new map function naming and the new path to the Linux vm header.

Patch Series Goal
-----------------

There's two major goals for this patch series. Note that these
goals are not achieved in this patch. There will be subsequent
patches.

  1.  Remove all last vestiges of Linux code from common/mm/vm.c
  2.  Implement map caching in the common/mm/vm.c code

To accomplish this firstly the VM mapping code needs to have the
struct nvgpu_mapped_buf data struct be completely Linux free. That
means implementing an abstraction for this to hold the Linux stuff
that mapped buffers carry about (SGT, dma_buf). This is why the
vm_priv.h code has been moved: it will need to be included by the
<nvgpu/vm.h> header so that the OS specific struct can be pulled
into struct nvgpu_mapped_buf.

Next renaming the nvgpu_vm_map() to nvgpu_vm_map_linux() is in
preparation for adding a new nvgpu_vm_map() that handles the
map caching with nvgpu_mapped_buf. The mapping code is fairly
straight forward: nvgpu_vm_map does OS generic stuff; each OS
then calls this function from an nvgpu_vm_map_<OS>() or the like
that does any OS specific adjustments/management.

Freeing buffers is much more tricky however. The maps are all
reference counted since userspace does not track buffers and
expects us to handle this instead. Ugh! Since there's ref-counts
the free code will require a callback into the OS specific code
since the OS specific code cannot free a buffer directly. THis
make's the path for freeing a buffer quite convoluted.

JIRA NVGPU-30
JIRA NVGPU-71

Change-Id: I5e0975f60663a0d6cf0a6bd90e099f51e02c2395
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1578896
GVS: Gerrit_Virtual_Submit
Reviewed-by: David Martinez Nieto <dmartineznie@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
2017-10-24 15:16:50 -07:00

248 lines
5.8 KiB
C

/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <nvgpu/comptags.h>
#include <nvgpu/enabled.h>
#include <nvgpu/linux/vm.h>
#include <nvgpu/linux/vidmem.h>
#include "gk20a/gk20a.h"
#include "gk20a/platform_gk20a.h"
#include "dmabuf.h"
#include "os_linux.h"
static void gk20a_mm_delete_priv(void *_priv)
{
struct gk20a_buffer_state *s, *s_tmp;
struct gk20a_dmabuf_priv *priv = _priv;
struct gk20a *g;
if (!priv)
return;
g = priv->g;
if (priv->comptags.lines) {
BUG_ON(!priv->comptag_allocator);
gk20a_comptaglines_free(priv->comptag_allocator,
priv->comptags.offset,
priv->comptags.allocated_lines);
}
/* Free buffer states */
nvgpu_list_for_each_entry_safe(s, s_tmp, &priv->states,
gk20a_buffer_state, list) {
gk20a_fence_put(s->fence);
nvgpu_list_del(&s->list);
nvgpu_kfree(g, s);
}
nvgpu_kfree(g, priv);
}
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
struct dma_buf *dmabuf)
{
struct gk20a *buf_owner = nvgpu_vidmem_buf_owner(dmabuf);
bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY);
if (buf_owner == NULL) {
/* Not nvgpu-allocated, assume system memory */
return APERTURE_SYSMEM;
} else if (WARN_ON(buf_owner == g && unified_memory)) {
/* Looks like our video memory, but this gpu doesn't support
* it. Warn about a bug and bail out */
nvgpu_warn(g,
"dmabuf is our vidmem but we don't have local vidmem");
return APERTURE_INVALID;
} else if (buf_owner != g) {
/* Someone else's vidmem */
return APERTURE_INVALID;
} else {
/* Yay, buf_owner == g */
return APERTURE_VIDMEM;
}
}
struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
{
struct gk20a_dmabuf_priv *priv;
priv = dma_buf_get_drvdata(dmabuf, dev);
if (WARN_ON(!priv))
return ERR_PTR(-EINVAL);
nvgpu_mutex_acquire(&priv->lock);
if (priv->pin_count == 0) {
priv->attach = dma_buf_attach(dmabuf, dev);
if (IS_ERR(priv->attach)) {
nvgpu_mutex_release(&priv->lock);
return (struct sg_table *)priv->attach;
}
priv->sgt = dma_buf_map_attachment(priv->attach,
DMA_BIDIRECTIONAL);
if (IS_ERR(priv->sgt)) {
dma_buf_detach(dmabuf, priv->attach);
nvgpu_mutex_release(&priv->lock);
return priv->sgt;
}
}
priv->pin_count++;
nvgpu_mutex_release(&priv->lock);
return priv->sgt;
}
void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
struct sg_table *sgt)
{
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
dma_addr_t dma_addr;
if (IS_ERR(priv) || !priv)
return;
nvgpu_mutex_acquire(&priv->lock);
WARN_ON(priv->sgt != sgt);
priv->pin_count--;
WARN_ON(priv->pin_count < 0);
dma_addr = sg_dma_address(priv->sgt->sgl);
if (priv->pin_count == 0) {
dma_buf_unmap_attachment(priv->attach, priv->sgt,
DMA_BIDIRECTIONAL);
dma_buf_detach(dmabuf, priv->attach);
}
nvgpu_mutex_release(&priv->lock);
}
int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
{
struct gk20a *g = gk20a_get_platform(dev)->g;
struct gk20a_dmabuf_priv *priv;
static u64 priv_count = 0;
priv = dma_buf_get_drvdata(dmabuf, dev);
if (likely(priv))
return 0;
nvgpu_mutex_acquire(&g->mm.priv_lock);
priv = dma_buf_get_drvdata(dmabuf, dev);
if (priv)
goto priv_exist_or_err;
priv = nvgpu_kzalloc(g, sizeof(*priv));
if (!priv) {
priv = ERR_PTR(-ENOMEM);
goto priv_exist_or_err;
}
nvgpu_mutex_init(&priv->lock);
nvgpu_init_list_node(&priv->states);
priv->buffer_id = ++priv_count;
priv->g = g;
dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
priv_exist_or_err:
nvgpu_mutex_release(&g->mm.priv_lock);
if (IS_ERR(priv))
return -ENOMEM;
return 0;
}
int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
u64 offset, struct gk20a_buffer_state **state)
{
int err = 0;
struct gk20a_dmabuf_priv *priv;
struct gk20a_buffer_state *s;
struct device *dev = dev_from_gk20a(g);
if (WARN_ON(offset >= (u64)dmabuf->size))
return -EINVAL;
err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
if (err)
return err;
priv = dma_buf_get_drvdata(dmabuf, dev);
if (WARN_ON(!priv))
return -ENOSYS;
nvgpu_mutex_acquire(&priv->lock);
nvgpu_list_for_each_entry(s, &priv->states, gk20a_buffer_state, list)
if (s->offset == offset)
goto out;
/* State not found, create state. */
s = nvgpu_kzalloc(g, sizeof(*s));
if (!s) {
err = -ENOMEM;
goto out;
}
s->offset = offset;
nvgpu_init_list_node(&s->list);
nvgpu_mutex_init(&s->lock);
nvgpu_list_add_tail(&s->list, &priv->states);
out:
nvgpu_mutex_release(&priv->lock);
if (!err)
*state = s;
return err;
}
int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
u64 *buffer_id, u64 *buffer_len)
{
struct dma_buf *dmabuf;
struct gk20a_dmabuf_priv *priv;
int err = 0;
dmabuf = dma_buf_get(dmabuf_fd);
if (IS_ERR(dmabuf)) {
dev_warn(dev, "%s: fd %d is not a dmabuf", __func__, dmabuf_fd);
return PTR_ERR(dmabuf);
}
err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
if (err) {
dev_warn(dev, "Failed to allocate dmabuf drvdata (err = %d)",
err);
goto clean_up;
}
priv = dma_buf_get_drvdata(dmabuf, dev);
if (likely(priv)) {
*buffer_id = priv->buffer_id;
*buffer_len = dmabuf->size;
}
clean_up:
dma_buf_put(dmabuf);
return err;
}