gpu: nvgpu: Disable compression for k6.1+

dmabuf internals that nvgpu relies upon for storing meta-data for
compressible buffers changed in k6.1. For now, disable compression
on all k6.1+ kernels.

Additionally, fix numerous compilation issues due to the bit rotted
compression config. All normal Tegra products support compression
and thus have this config enabled. Over the last several years
compression dependent code crept in that wasn't protected under the
compression config.

Bug 3844023

Change-Id: Ie5b9b5a2bcf1a763806c087af99203d62d0cb6e0
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2820846
(cherry picked from commit 03533066aa)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2860925
Tested-by: Jonathan Hunter <jonathanh@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Jonathan Hunter <jonathanh@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Alex Waterman
2022-12-02 21:06:51 +00:00
committed by mobile promotions
parent e914561b6e
commit ac6e0c3766
12 changed files with 81 additions and 21 deletions

View File

@@ -495,8 +495,12 @@ nvgpu-y += \
os/linux/periodic_timer.o os/linux/periodic_timer.o
nvgpu-$(CONFIG_NVGPU_IVM_BUILD) += \ nvgpu-$(CONFIG_NVGPU_IVM_BUILD) += \
os/linux/nvgpu_ivm.o \ os/linux/nvgpu_ivm.o
ifeq ($(CONFIG_NVGPU_COMPRESSION),y)
nvgpu-$(CONFIG_NVGPU_IVM_BUILD) += \
common/cbc/contig_pool.o common/cbc/contig_pool.o
endif
nvgpu-$(CONFIG_NVGPU_VPR) += os/linux/vpr.o nvgpu-$(CONFIG_NVGPU_VPR) += os/linux/vpr.o

View File

@@ -35,8 +35,16 @@ CONFIG_NVGPU_HAL_NON_FUSA := y
# Support recovery on failure (which may involve engine reset) # Support recovery on failure (which may involve engine reset)
CONFIG_NVGPU_RECOVERY := y CONFIG_NVGPU_RECOVERY := y
# Support for compression # Enable support for compression on pre-K6.1 kernels. K6.1+ changes the
# internals of dma-bufs which breaks some hacks we implemented to support
# compression meta-data tracking. For now, on K6.1+ kernels, just disable
# compression. This is a hack that should be fixed.
CONFIG_NVGPU_COMPRESSION := y CONFIG_NVGPU_COMPRESSION := y
ifeq ($(VERSION),6)
ifneq ($(PATCHLEVEL),0)
CONFIG_NVGPU_COMPRESSION := n
endif
endif
# Enable MIG Support # Enable MIG Support
CONFIG_NVGPU_MIG := y CONFIG_NVGPU_MIG := y

View File

@@ -49,11 +49,11 @@ struct nvgpu_ctag_buffer_info {
u32 flags; u32 flags;
#ifdef CONFIG_NVGPU_COMPRESSION #ifdef CONFIG_NVGPU_COMPRESSION
u32 ctag_offset;
s16 compr_kind; s16 compr_kind;
#endif #endif
s16 incompr_kind; s16 incompr_kind;
u32 ctag_offset;
}; };
#ifdef CONFIG_NVGPU_COMPRESSION #ifdef CONFIG_NVGPU_COMPRESSION
@@ -1546,7 +1546,9 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
mapped_buffer->kind = map_key_kind; mapped_buffer->kind = map_key_kind;
mapped_buffer->va_allocated = va_allocated; mapped_buffer->va_allocated = va_allocated;
mapped_buffer->vm_area = vm_area; mapped_buffer->vm_area = vm_area;
#ifdef CONFIG_NVGPU_COMPRESSION
mapped_buffer->ctag_offset = binfo.ctag_offset; mapped_buffer->ctag_offset = binfo.ctag_offset;
#endif
mapped_buffer->rw_flag = rw; mapped_buffer->rw_flag = rw;
mapped_buffer->aperture = aperture; mapped_buffer->aperture = aperture;

View File

@@ -38,7 +38,9 @@
#include "os_linux.h" #include "os_linux.h"
#include "dmabuf_vidmem.h" #include "dmabuf_vidmem.h"
#ifdef CONFIG_NVGPU_COMPRESSION
void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv); void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv);
#endif
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
struct dma_buf *dmabuf) struct dma_buf *dmabuf)
@@ -68,6 +70,7 @@ enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
#endif #endif
} }
#ifdef CONFIG_NVGPU_COMPRESSION
static struct gk20a_dmabuf_priv *dma_buf_ops_to_gk20a_priv( static struct gk20a_dmabuf_priv *dma_buf_ops_to_gk20a_priv(
struct dma_buf_ops *ops) struct dma_buf_ops *ops)
{ {
@@ -94,7 +97,6 @@ static void nvgpu_dma_buf_release(struct dma_buf *dmabuf)
nvgpu_mutex_acquire(&l->dmabuf_priv_list_lock); nvgpu_mutex_acquire(&l->dmabuf_priv_list_lock);
gk20a_mm_delete_priv(priv); gk20a_mm_delete_priv(priv);
nvgpu_mutex_release(&l->dmabuf_priv_list_lock); nvgpu_mutex_release(&l->dmabuf_priv_list_lock);
dmabuf->ops->release(dmabuf); dmabuf->ops->release(dmabuf);
} }
@@ -137,6 +139,7 @@ struct gk20a_dmabuf_priv *gk20a_dma_buf_get_drvdata(
return priv; return priv;
} }
#endif
struct sg_table *nvgpu_mm_pin(struct device *dev, struct sg_table *nvgpu_mm_pin(struct device *dev,
struct dma_buf *dmabuf, struct dma_buf_attachment **attachment, struct dma_buf *dmabuf, struct dma_buf_attachment **attachment,
@@ -178,6 +181,7 @@ void nvgpu_mm_unpin(struct device *dev,
/* This function must be called after acquiring the global level /* This function must be called after acquiring the global level
* dmabuf_priv_list_lock. * dmabuf_priv_list_lock.
*/ */
#ifdef CONFIG_NVGPU_COMPRESSION
void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv) void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv)
{ {
struct gk20a_buffer_state *s, *s_tmp; struct gk20a_buffer_state *s, *s_tmp;
@@ -325,6 +329,7 @@ out:
*state = s; *state = s;
return err; return err;
} }
#endif
static void *__gk20a_dmabuf_vmap(struct dma_buf *dmabuf) static void *__gk20a_dmabuf_vmap(struct dma_buf *dmabuf)
{ {

View File

@@ -66,6 +66,7 @@ gk20a_buffer_state_from_list(struct nvgpu_list_node *node)
((uintptr_t)node - offsetof(struct gk20a_buffer_state, list)); ((uintptr_t)node - offsetof(struct gk20a_buffer_state, list));
}; };
#ifdef CONFIG_NVGPU_COMPRESSION
struct gk20a_dmabuf_priv { struct gk20a_dmabuf_priv {
struct nvgpu_mutex lock; struct nvgpu_mutex lock;
@@ -100,6 +101,7 @@ struct gk20a_dmabuf_priv {
/* list node for tracking the dmabuf_priv instances per gpu */ /* list node for tracking the dmabuf_priv instances per gpu */
struct nvgpu_list_node list; struct nvgpu_list_node list;
}; };
#endif
struct sg_table *nvgpu_mm_pin(struct device *dev, struct sg_table *nvgpu_mm_pin(struct device *dev,
struct dma_buf *dmabuf, struct dma_buf *dmabuf,
@@ -111,6 +113,7 @@ void nvgpu_mm_unpin(struct device *dev,
struct dma_buf_attachment *attachment, struct dma_buf_attachment *attachment,
struct sg_table *sgt); struct sg_table *sgt);
#ifdef CONFIG_NVGPU_COMPRESSION
void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv); void gk20a_mm_delete_priv(struct gk20a_dmabuf_priv *priv);
int gk20a_dmabuf_alloc_or_get_drvdata(struct dma_buf *dmabuf, struct device *dev, int gk20a_dmabuf_alloc_or_get_drvdata(struct dma_buf *dmabuf, struct device *dev,
@@ -122,6 +125,8 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
void gk20a_dma_buf_priv_list_clear(struct nvgpu_os_linux *l); void gk20a_dma_buf_priv_list_clear(struct nvgpu_os_linux *l);
struct gk20a_dmabuf_priv *gk20a_dma_buf_get_drvdata( struct gk20a_dmabuf_priv *gk20a_dma_buf_get_drvdata(
struct dma_buf *dmabuf, struct device *device); struct dma_buf *dmabuf, struct device *device);
#endif
void *gk20a_dmabuf_vmap(struct dma_buf *dmabuf); void *gk20a_dmabuf_vmap(struct dma_buf *dmabuf);
void gk20a_dmabuf_vunmap(struct dma_buf *dmabuf, void *addr); void gk20a_dmabuf_vunmap(struct dma_buf *dmabuf, void *addr);

View File

@@ -139,12 +139,14 @@ static void nvgpu_init_vars(struct gk20a *g)
static void nvgpu_init_max_comptag(struct gk20a *g) static void nvgpu_init_max_comptag(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_COMPRESSION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
nvgpu_log_info(g, "total ram pages : %lu", totalram_pages()); nvgpu_log_info(g, "total ram pages : %lu", totalram_pages());
#else #else
nvgpu_log_info(g, "total ram pages : %lu", totalram_pages); nvgpu_log_info(g, "total ram pages : %lu", totalram_pages);
#endif #endif
g->max_comptag_mem = totalram_size_in_mb; g->max_comptag_mem = totalram_size_in_mb;
#endif
} }
static void nvgpu_init_timeout(struct gk20a *g) static void nvgpu_init_timeout(struct gk20a *g)

View File

@@ -1942,12 +1942,15 @@ out:
static int nvgpu_gpu_ioctl_get_buffer_info(struct gk20a *g, static int nvgpu_gpu_ioctl_get_buffer_info(struct gk20a *g,
struct nvgpu_gpu_get_buffer_info_args *args) struct nvgpu_gpu_get_buffer_info_args *args)
{ {
int err = -EINVAL;
#ifdef CONFIG_NVGPU_COMPRESSION
u64 user_metadata_addr = args->in.metadata_addr; u64 user_metadata_addr = args->in.metadata_addr;
u32 in_metadata_size = args->in.metadata_size; u32 in_metadata_size = args->in.metadata_size;
struct gk20a_dmabuf_priv *priv = NULL; struct gk20a_dmabuf_priv *priv = NULL;
s32 dmabuf_fd = args->in.dmabuf_fd; s32 dmabuf_fd = args->in.dmabuf_fd;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
int err = 0;
err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -2002,13 +2005,11 @@ static int nvgpu_gpu_ioctl_get_buffer_info(struct gk20a *g,
NVGPU_GPU_BUFFER_INFO_FLAGS_METADATA_REGISTERED; NVGPU_GPU_BUFFER_INFO_FLAGS_METADATA_REGISTERED;
} }
#ifdef CONFIG_NVGPU_COMPRESSION
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_COMPRESSION) && if (nvgpu_is_enabled(g, NVGPU_SUPPORT_COMPRESSION) &&
priv->comptags.enabled) { priv->comptags.enabled) {
args->out.flags |= args->out.flags |=
NVGPU_GPU_BUFFER_INFO_FLAGS_COMPTAGS_ALLOCATED; NVGPU_GPU_BUFFER_INFO_FLAGS_COMPTAGS_ALLOCATED;
} }
#endif
if (priv->mutable_metadata) { if (priv->mutable_metadata) {
args->out.flags |= args->out.flags |=
@@ -2022,6 +2023,7 @@ out_priv_unlock:
nvgpu_mutex_release(&priv->lock); nvgpu_mutex_release(&priv->lock);
out: out:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
#endif
return err; return err;
} }
@@ -2098,12 +2100,13 @@ static int nvgpu_handle_comptags_control(struct gk20a *g,
static int nvgpu_gpu_ioctl_register_buffer(struct gk20a *g, static int nvgpu_gpu_ioctl_register_buffer(struct gk20a *g,
struct nvgpu_gpu_register_buffer_args *args) struct nvgpu_gpu_register_buffer_args *args)
{ {
int err = 0;
#ifdef CONFIG_NVGPU_COMPRESSION
struct gk20a_dmabuf_priv *priv = NULL; struct gk20a_dmabuf_priv *priv = NULL;
bool mutable_metadata = false; bool mutable_metadata = false;
bool modify_metadata = false; bool modify_metadata = false;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
u8 *blob_copy = NULL; u8 *blob_copy = NULL;
int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -2178,7 +2181,6 @@ static int nvgpu_gpu_ioctl_register_buffer(struct gk20a *g,
goto out_priv_unlock; goto out_priv_unlock;
} }
#ifdef CONFIG_NVGPU_COMPRESSION
/* Comptags allocation */ /* Comptags allocation */
err = nvgpu_handle_comptags_control(g, dmabuf, priv, err = nvgpu_handle_comptags_control(g, dmabuf, priv,
args->comptags_alloc_control); args->comptags_alloc_control);
@@ -2186,7 +2188,6 @@ static int nvgpu_gpu_ioctl_register_buffer(struct gk20a *g,
nvgpu_err(g, "Comptags alloc control failed %d", err); nvgpu_err(g, "Comptags alloc control failed %d", err);
goto out_priv_unlock; goto out_priv_unlock;
} }
#endif
/* All done, update metadata blob */ /* All done, update metadata blob */
nvgpu_kfree(g, priv->metadata_blob); nvgpu_kfree(g, priv->metadata_blob);
@@ -2202,13 +2203,11 @@ static int nvgpu_gpu_ioctl_register_buffer(struct gk20a *g,
/* Output variables */ /* Output variables */
args->flags = 0; args->flags = 0;
#ifdef CONFIG_NVGPU_COMPRESSION
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_COMPRESSION) && if (nvgpu_is_enabled(g, NVGPU_SUPPORT_COMPRESSION) &&
priv->comptags.enabled) { priv->comptags.enabled) {
args->flags |= args->flags |=
NVGPU_GPU_REGISTER_BUFFER_FLAGS_COMPTAGS_ALLOCATED; NVGPU_GPU_REGISTER_BUFFER_FLAGS_COMPTAGS_ALLOCATED;
} }
#endif
nvgpu_log_info(g, "buffer registered: mutable: %s, metadata size: %u, flags: 0x%8x", nvgpu_log_info(g, "buffer registered: mutable: %s, metadata size: %u, flags: 0x%8x",
priv->mutable_metadata ? "yes" : "no", priv->metadata_blob_size, priv->mutable_metadata ? "yes" : "no", priv->metadata_blob_size,
@@ -2219,7 +2218,7 @@ out_priv_unlock:
out: out:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvgpu_kfree(g, blob_copy); nvgpu_kfree(g, blob_copy);
#endif
return err; return err;
} }

View File

@@ -1900,8 +1900,10 @@ static int gk20a_probe(struct platform_device *dev)
nvgpu_l1ss_init_reporting(gk20a); nvgpu_l1ss_init_reporting(gk20a);
#endif #endif
#ifdef CONFIG_NVGPU_COMPRESSION
nvgpu_mutex_init(&l->dmabuf_priv_list_lock); nvgpu_mutex_init(&l->dmabuf_priv_list_lock);
nvgpu_init_list_node(&l->dmabuf_priv_list); nvgpu_init_list_node(&l->dmabuf_priv_list);
#endif
gk20a->probe_done = true; gk20a->probe_done = true;
@@ -2033,8 +2035,10 @@ static int __exit gk20a_remove(struct platform_device *pdev)
err = nvgpu_remove(dev); err = nvgpu_remove(dev);
#ifdef CONFIG_NVGPU_COMPRESSION
gk20a_dma_buf_priv_list_clear(l); gk20a_dma_buf_priv_list_clear(l);
nvgpu_mutex_destroy(&l->dmabuf_priv_list_lock); nvgpu_mutex_destroy(&l->dmabuf_priv_list_lock);
#endif
unregister_reboot_notifier(&l->nvgpu_reboot_nb); unregister_reboot_notifier(&l->nvgpu_reboot_nb);

View File

@@ -743,7 +743,9 @@ static void nvgpu_pci_remove(struct pci_dev *pdev)
if (gk20a_gpu_is_virtual(dev)) if (gk20a_gpu_is_virtual(dev))
return; return;
#ifdef CONFIG_NVGPU_COMPRESSION
gk20a_dma_buf_priv_list_clear(l); gk20a_dma_buf_priv_list_clear(l);
#endif
nvgpu_mutex_destroy(&l->dmabuf_priv_list_lock); nvgpu_mutex_destroy(&l->dmabuf_priv_list_lock);
err = nvgpu_pci_clear_pci_power(dev_name(dev)); err = nvgpu_pci_clear_pci_power(dev_name(dev));

View File

@@ -1191,6 +1191,7 @@ static ssize_t tsg_timeslice_max_us_store(struct device *dev,
static DEVICE_ATTR(tsg_timeslice_max_us, ROOTRW, tsg_timeslice_max_us_read, static DEVICE_ATTR(tsg_timeslice_max_us, ROOTRW, tsg_timeslice_max_us_read,
tsg_timeslice_max_us_store); tsg_timeslice_max_us_store);
#ifdef CONFIG_NVGPU_COMPRESSION
static ssize_t comptag_mem_deduct_store(struct device *dev, static ssize_t comptag_mem_deduct_store(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
@@ -1224,6 +1225,7 @@ static ssize_t comptag_mem_deduct_show(struct device *dev,
static DEVICE_ATTR(comptag_mem_deduct, ROOTRW, static DEVICE_ATTR(comptag_mem_deduct, ROOTRW,
comptag_mem_deduct_show, comptag_mem_deduct_store); comptag_mem_deduct_show, comptag_mem_deduct_store);
#endif
#ifdef CONFIG_NVGPU_MIG #ifdef CONFIG_NVGPU_MIG
static ssize_t mig_mode_config_list_show(struct device *dev, static ssize_t mig_mode_config_list_show(struct device *dev,
@@ -1399,7 +1401,10 @@ void nvgpu_remove_sysfs(struct device *dev)
device_remove_file(dev, &dev_attr_gpu_powered_on); device_remove_file(dev, &dev_attr_gpu_powered_on);
#ifdef CONFIG_NVGPU_COMPRESSION
device_remove_file(dev, &dev_attr_comptag_mem_deduct); device_remove_file(dev, &dev_attr_comptag_mem_deduct);
#endif
#ifdef CONFIG_NVGPU_MIG #ifdef CONFIG_NVGPU_MIG
device_remove_file(dev, &dev_attr_mig_mode_config_list); device_remove_file(dev, &dev_attr_mig_mode_config_list);
device_remove_file(dev, &dev_attr_mig_mode_config); device_remove_file(dev, &dev_attr_mig_mode_config);
@@ -1466,7 +1471,10 @@ int nvgpu_create_sysfs(struct device *dev)
error |= device_create_file(dev, &dev_attr_gpu_powered_on); error |= device_create_file(dev, &dev_attr_gpu_powered_on);
error |= device_create_file(dev, &dev_attr_comptag_mem_deduct); #ifdef CONFIG_NVGPU_COMPRESSION
device_create_file(dev, &dev_attr_comptag_mem_deduct);
#endif
#ifdef CONFIG_NVGPU_MIG #ifdef CONFIG_NVGPU_MIG
error |= device_create_file(dev, &dev_attr_mig_mode_config_list); error |= device_create_file(dev, &dev_attr_mig_mode_config_list);
error |= device_create_file(dev, &dev_attr_mig_mode_config); error |= device_create_file(dev, &dev_attr_mig_mode_config);

View File

@@ -468,9 +468,11 @@ int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
struct nvgpu_sgt *nvgpu_sgt = NULL; struct nvgpu_sgt *nvgpu_sgt = NULL;
u32 pgsz_idx; u32 pgsz_idx;
u32 page_size; u32 page_size;
u64 ctag_offset;
s16 kind = NV_KIND_INVALID; s16 kind = NV_KIND_INVALID;
u64 ctag_offset = 0UL;
#ifdef CONFIG_NVGPU_COMPRESSION
u64 compression_page_size; u64 compression_page_size;
#endif
nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -510,19 +512,32 @@ int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
goto out; goto out;
} }
/*
* Fall back is the incompressible kind.
*/
kind = incompr_kind;
/*
* If we support compression and there's a compressible kind, use it.
*/
#ifdef CONFIG_NVGPU_COMPRESSION
if (mapped_buffer->ctag_offset != 0) { if (mapped_buffer->ctag_offset != 0) {
if (compr_kind == NV_KIND_INVALID) { if (compr_kind == NV_KIND_INVALID) {
kind = incompr_kind; kind = incompr_kind;
} else { } else {
kind = compr_kind; kind = compr_kind;
} }
} else { }
if (incompr_kind == NV_KIND_INVALID) { #endif
/*
* If we don't support compression you still need to have a valid kind
* specified.
*/
if (kind == NV_KIND_INVALID) {
nvgpu_err(g, "invalid incompr_kind specified"); nvgpu_err(g, "invalid incompr_kind specified");
goto out; goto out;
} }
kind = incompr_kind;
}
nvgpu_sgt = nvgpu_linux_sgt_create(g, mapped_buffer->os_priv.sgt); nvgpu_sgt = nvgpu_linux_sgt_create(g, mapped_buffer->os_priv.sgt);
if (!nvgpu_sgt) { if (!nvgpu_sgt) {
@@ -530,6 +545,7 @@ int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
goto out; goto out;
} }
#ifdef CONFIG_NVGPU_COMPRESSION
ctag_offset = mapped_buffer->ctag_offset; ctag_offset = mapped_buffer->ctag_offset;
compression_page_size = g->ops.fb.compression_page_size(g); compression_page_size = g->ops.fb.compression_page_size(g);
@@ -537,6 +553,7 @@ int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
ctag_offset += (u32)(buffer_offset >> ctag_offset += (u32)(buffer_offset >>
nvgpu_ilog2(compression_page_size)); nvgpu_ilog2(compression_page_size));
#endif
if (g->ops.mm.gmmu.map(vm, if (g->ops.mm.gmmu.map(vm,
map_address + buffer_offset, map_address + buffer_offset,

View File

@@ -118,12 +118,15 @@ void nvgpu_vm_remap_os_buf_put(struct vm_gk20a *vm,
{ {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
struct device *dev = dev_from_gk20a(g); struct device *dev = dev_from_gk20a(g);
#ifdef CONFIG_NVGPU_COMPRESSION
struct gk20a_comptags comptags; struct gk20a_comptags comptags;
int err = 0; int err = 0;
#endif
nvgpu_mm_unpin(dev, remap_os_buf->os_priv.dmabuf, nvgpu_mm_unpin(dev, remap_os_buf->os_priv.dmabuf,
remap_os_buf->os_priv.attachment, remap_os_buf->os_priv.sgt); remap_os_buf->os_priv.attachment, remap_os_buf->os_priv.sgt);
#ifdef CONFIG_NVGPU_COMPRESSION
gk20a_get_comptags(&remap_os_buf->os_buf, &comptags); gk20a_get_comptags(&remap_os_buf->os_buf, &comptags);
/* /*
@@ -139,6 +142,7 @@ void nvgpu_vm_remap_os_buf_put(struct vm_gk20a *vm,
return; return;
} }
} }
#endif
nvgpu_sgt_free(g, remap_os_buf->nv_sgt); nvgpu_sgt_free(g, remap_os_buf->nv_sgt);