mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Fix build for Linux v5.18
Upstream commit 7938f4218168 ("dma-buf-map: Rename to iosys-map")
renames 'struct dma_buf_map' to 'struct iosys_map' and breaks building
the NVGPU driver with Linux v5.18-rc1. In the NVGPU driver there are
many places where 'dma_buf_map' is used and so to clean-up the code and
minimise the impact of this change, add a gk20a_dmabuf_vmap() and a
gk20a_dmabuf_vunmap() helper function. These new functions support all
kernel versions and eliminate a lot the KERNEL_VERSION ifdefs.
Bug 3598986
Change-Id: Id0f904ec0662f20f3d699b74efd9542d12344228
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2693970
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
19a8adeae1
commit
86c0a696ed
@@ -1042,9 +1042,6 @@ __releases(&l->cde_app->mutex)
|
||||
const s16 compbits_kind = 0;
|
||||
u32 submit_op;
|
||||
struct dma_buf_attachment *attachment;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
|
||||
nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
|
||||
compbits_byte_offset, scatterbuffer_byte_offset);
|
||||
@@ -1135,12 +1132,7 @@ __releases(&l->cde_app->mutex)
|
||||
struct sg_table *sgt;
|
||||
void *scatter_buffer;
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
err = dma_buf_vmap(compbits_scatter_buf, &map);
|
||||
surface = err ? NULL : map.vaddr;
|
||||
#else
|
||||
surface = dma_buf_vmap(compbits_scatter_buf);
|
||||
#endif
|
||||
surface = gk20a_dmabuf_vmap(compbits_scatter_buf);
|
||||
if (!surface) {
|
||||
nvgpu_warn(g, "dma_buf_vmap failed");
|
||||
err = -EINVAL;
|
||||
@@ -1189,11 +1181,7 @@ __releases(&l->cde_app->mutex)
|
||||
goto exit_unmap_surface;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(compbits_scatter_buf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(compbits_scatter_buf, surface);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(compbits_scatter_buf, surface);
|
||||
surface = NULL;
|
||||
}
|
||||
|
||||
@@ -1282,13 +1270,8 @@ __releases(&l->cde_app->mutex)
|
||||
return err;
|
||||
|
||||
exit_unmap_surface:
|
||||
if (surface) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(compbits_scatter_buf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(compbits_scatter_buf, surface);
|
||||
#endif
|
||||
}
|
||||
if (surface)
|
||||
gk20a_dmabuf_vunmap(compbits_scatter_buf, surface);
|
||||
exit_unmap_vaddr:
|
||||
nvgpu_vm_unmap(cde_ctx->vm, map_vaddr, NULL);
|
||||
exit_idle:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -14,9 +14,15 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
|
||||
#include <linux/iosys-map.h>
|
||||
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
#include <linux/dma-buf-map.h>
|
||||
#endif
|
||||
|
||||
#include <nvgpu/comptags.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
@@ -318,3 +324,38 @@ out:
|
||||
*state = s;
|
||||
return err;
|
||||
}
|
||||
|
||||
void *gk20a_dmabuf_vmap(struct dma_buf *dmabuf)
|
||||
{
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
|
||||
struct iosys_map map;
|
||||
#else
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
/* Linux v5.11 and later kernels */
|
||||
if (dma_buf_vmap(dmabuf, &map))
|
||||
return NULL;
|
||||
|
||||
return map.vaddr;
|
||||
#else
|
||||
/* Linux v5.10 and earlier kernels */
|
||||
return dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
}
|
||||
|
||||
void gk20a_dmabuf_vunmap(struct dma_buf *dmabuf, void *addr)
|
||||
{
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
|
||||
#else
|
||||
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
|
||||
#endif
|
||||
/* Linux v5.11 and later kernels */
|
||||
dma_buf_vunmap(dmabuf, &map);
|
||||
#else
|
||||
/* Linux v5.10 and earlier kernels */
|
||||
dma_buf_vunmap(dmabuf, addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -122,5 +122,7 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
|
||||
void gk20a_dma_buf_priv_list_clear(struct nvgpu_os_linux *l);
|
||||
struct gk20a_dmabuf_priv *gk20a_dma_buf_get_drvdata(
|
||||
struct dma_buf *dmabuf, struct device *device);
|
||||
void *gk20a_dmabuf_vmap(struct dma_buf *dmabuf);
|
||||
void gk20a_dmabuf_vunmap(struct dma_buf *dmabuf, void *addr);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics channel
|
||||
*
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -59,6 +59,7 @@
|
||||
#include "ioctl.h"
|
||||
#include "channel.h"
|
||||
#include "os_linux.h"
|
||||
#include "dmabuf_priv.h"
|
||||
|
||||
/* the minimal size of client buffer */
|
||||
#define CSS_MIN_CLIENT_SNAPSHOT_SIZE \
|
||||
@@ -136,15 +137,8 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
|
||||
/* disable existing cyclestats buffer */
|
||||
nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
|
||||
if (priv->cyclestate_buffer_handler) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
|
||||
dma_buf_map_set_vaddr(&map, ch->cyclestate.cyclestate_buffer);
|
||||
dma_buf_vunmap(priv->cyclestate_buffer_handler, &map);
|
||||
#else
|
||||
dma_buf_vunmap(priv->cyclestate_buffer_handler,
|
||||
gk20a_dmabuf_vunmap(priv->cyclestate_buffer_handler,
|
||||
ch->cyclestate.cyclestate_buffer);
|
||||
#endif
|
||||
dma_buf_put(priv->cyclestate_buffer_handler);
|
||||
priv->cyclestate_buffer_handler = NULL;
|
||||
ch->cyclestate.cyclestate_buffer = NULL;
|
||||
@@ -156,10 +150,6 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
|
||||
int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
int err;
|
||||
#endif
|
||||
void *virtual_address;
|
||||
struct nvgpu_channel_linux *priv = ch->os_priv;
|
||||
|
||||
@@ -173,12 +163,8 @@ int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
|
||||
dmabuf = dma_buf_get(dmabuf_fd);
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
err = dma_buf_vmap(dmabuf, &map);
|
||||
virtual_address = err ? NULL : map.vaddr;
|
||||
#else
|
||||
virtual_address = dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
|
||||
virtual_address = gk20a_dmabuf_vmap(dmabuf);
|
||||
if (!virtual_address) {
|
||||
dma_buf_put(dmabuf);
|
||||
return -ENOMEM;
|
||||
@@ -226,9 +212,6 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
|
||||
struct gk20a *g = ch->g;
|
||||
struct gk20a_cs_snapshot_client_linux *client_linux;
|
||||
struct gk20a_cs_snapshot_client *client;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
|
||||
nvgpu_mutex_acquire(&ch->cs_client_mutex);
|
||||
if (ch->cs_client) {
|
||||
@@ -257,14 +240,8 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
ret = dma_buf_vmap(client_linux->dma_handler, &map);
|
||||
client->snapshot = ret ? NULL :
|
||||
(struct gk20a_cs_snapshot_fifo *)map.vaddr;
|
||||
#else
|
||||
client->snapshot = (struct gk20a_cs_snapshot_fifo *)
|
||||
dma_buf_vmap(client_linux->dma_handler);
|
||||
#endif
|
||||
gk20a_dmabuf_vmap(client_linux->dma_handler);
|
||||
if (!client->snapshot) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put;
|
||||
@@ -309,15 +286,8 @@ int gk20a_channel_free_cycle_stats_snapshot(struct nvgpu_channel *ch)
|
||||
|
||||
if (client_linux->dma_handler) {
|
||||
if (ch->cs_client->snapshot) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
|
||||
dma_buf_map_set_vaddr(&map, ch->cs_client->snapshot);
|
||||
dma_buf_vunmap(client_linux->dma_handler, &map);
|
||||
#else
|
||||
dma_buf_vunmap(client_linux->dma_handler,
|
||||
gk20a_dmabuf_vunmap(client_linux->dma_handler,
|
||||
ch->cs_client->snapshot);
|
||||
#endif
|
||||
}
|
||||
|
||||
dma_buf_put(client_linux->dma_handler);
|
||||
@@ -376,14 +346,8 @@ static void gk20a_channel_free_error_notifiers(struct nvgpu_channel *ch)
|
||||
|
||||
nvgpu_mutex_acquire(&priv->error_notifier.mutex);
|
||||
if (priv->error_notifier.dmabuf) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
|
||||
dma_buf_map_set_vaddr(&map, priv->error_notifier.vaddr);
|
||||
dma_buf_vunmap(priv->error_notifier.dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(priv->error_notifier.dmabuf, priv->error_notifier.vaddr);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(priv->error_notifier.dmabuf,
|
||||
priv->error_notifier.vaddr);
|
||||
dma_buf_put(priv->error_notifier.dmabuf);
|
||||
priv->error_notifier.dmabuf = NULL;
|
||||
priv->error_notifier.notification = NULL;
|
||||
@@ -396,10 +360,6 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
|
||||
struct nvgpu_set_error_notifier *args)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
int err;
|
||||
#endif
|
||||
void *va;
|
||||
u64 end = args->offset + sizeof(struct nvgpu_notification);
|
||||
struct nvgpu_channel_linux *priv = ch->os_priv;
|
||||
@@ -427,12 +387,7 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
|
||||
nvgpu_speculation_barrier();
|
||||
|
||||
/* map handle */
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
err = dma_buf_vmap(dmabuf, &map);
|
||||
va = err ? NULL : map.vaddr;
|
||||
#else
|
||||
va = dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
va = gk20a_dmabuf_vmap(dmabuf);
|
||||
if (!va) {
|
||||
dma_buf_put(dmabuf);
|
||||
pr_err("Cannot map notifier handle\n");
|
||||
@@ -749,9 +704,6 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
|
||||
u32 payload, u32 timeout)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
void *data;
|
||||
int ret = 0;
|
||||
|
||||
@@ -779,12 +731,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
|
||||
|
||||
nvgpu_speculation_barrier();
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
ret = dma_buf_vmap(dmabuf, &map);
|
||||
data = ret ? NULL : map.vaddr;
|
||||
#else
|
||||
data = dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
data = gk20a_dmabuf_vmap(dmabuf);
|
||||
if (!data) {
|
||||
nvgpu_err(ch->g, "failed to map semaphore memory");
|
||||
ret = -EINVAL;
|
||||
@@ -797,11 +744,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
|
||||
nvgpu_channel_check_unserviceable(ch),
|
||||
timeout);
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(dmabuf, data);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(dmabuf, data);
|
||||
cleanup_put:
|
||||
dma_buf_put(dmabuf);
|
||||
return ret;
|
||||
@@ -811,9 +754,6 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
|
||||
struct nvgpu_wait_args *args)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
struct gk20a *g = ch->g;
|
||||
struct notification *notif;
|
||||
struct timespec64 tv;
|
||||
@@ -850,12 +790,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
|
||||
|
||||
nvgpu_speculation_barrier();
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
ret = dma_buf_vmap(dmabuf, &map);
|
||||
notif = ret ? NULL : map.vaddr;
|
||||
#else
|
||||
notif = dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
notif = gk20a_dmabuf_vmap(dmabuf);
|
||||
if (!notif) {
|
||||
nvgpu_err(g, "failed to map notifier memory");
|
||||
return -ENOMEM;
|
||||
@@ -887,11 +822,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
|
||||
notif->info16 = ch->chid; /* should be method offset */
|
||||
|
||||
notif_clean_up:
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(dmabuf, notif);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(dmabuf, notif);
|
||||
return ret;
|
||||
|
||||
case NVGPU_WAIT_TYPE_SEMAPHORE:
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
#include "ioctl_dbg.h"
|
||||
#include "ioctl_channel.h"
|
||||
#include "ioctl.h"
|
||||
#include "dmabuf_priv.h"
|
||||
#include "dmabuf_vidmem.h"
|
||||
|
||||
#include "common/gr/ctx_priv.h"
|
||||
@@ -2440,15 +2441,8 @@ static int nvgpu_gpu_access_sysmem_gpu_va(struct gk20a *g, u8 cmd, u32 size,
|
||||
{
|
||||
int ret = 0;
|
||||
u8 *cpu_va = NULL;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
|
||||
ret = dma_buf_vmap(dmabuf, &map);
|
||||
cpu_va = ret ? NULL : map.vaddr;
|
||||
#else
|
||||
cpu_va = (u8 *)dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
|
||||
cpu_va = (u8 *)gk20a_dmabuf_vmap(dmabuf);
|
||||
if (!cpu_va) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -2467,11 +2461,8 @@ static int nvgpu_gpu_access_sysmem_gpu_va(struct gk20a *g, u8 cmd, u32 size,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(dmabuf, cpu_va);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(dmabuf, cpu_va);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <nvgpu/tsg.h>
|
||||
#include <nvgpu/fb.h>
|
||||
|
||||
#include "dmabuf_priv.h"
|
||||
#include "platform_gk20a.h"
|
||||
#include "os_linux.h"
|
||||
#include "ioctl_prof.h"
|
||||
@@ -380,9 +381,6 @@ static int nvgpu_prof_ioctl_alloc_pma_stream(struct nvgpu_profiler_object_priv *
|
||||
u64 pma_buffer_offset;
|
||||
struct dma_buf *pma_dmabuf;
|
||||
struct dma_buf *pma_bytes_available_dmabuf;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
void *cpuva;
|
||||
u32 pma_buffer_size;
|
||||
int err;
|
||||
@@ -452,12 +450,7 @@ static int nvgpu_prof_ioctl_alloc_pma_stream(struct nvgpu_profiler_object_priv *
|
||||
goto err_unmap_pma;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
err = dma_buf_vmap(pma_bytes_available_dmabuf, &map);
|
||||
cpuva = err ? NULL : map.vaddr;
|
||||
#else
|
||||
cpuva = dma_buf_vmap(pma_bytes_available_dmabuf);
|
||||
#endif
|
||||
cpuva = gk20a_dmabuf_vmap(pma_bytes_available_dmabuf);
|
||||
if (cpuva == NULL) {
|
||||
err = -ENOMEM;
|
||||
nvgpu_err(g, "failed to vmap available bytes buffer FD");
|
||||
@@ -501,9 +494,6 @@ static void nvgpu_prof_free_pma_stream_priv_data(struct nvgpu_profiler_object_pr
|
||||
struct nvgpu_profiler_object *prof = priv->prof;
|
||||
struct gk20a *g = prof->g;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
|
||||
if (priv->pma_bytes_available_buffer_dmabuf == NULL) {
|
||||
return;
|
||||
@@ -516,13 +506,8 @@ static void nvgpu_prof_free_pma_stream_priv_data(struct nvgpu_profiler_object_pr
|
||||
prof->pma_buffer_va = 0U;
|
||||
prof->pma_buffer_size = 0U;
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_map_set_vaddr(&map, prof->pma_bytes_available_buffer_cpuva);
|
||||
dma_buf_vunmap(priv->pma_bytes_available_buffer_dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(priv->pma_bytes_available_buffer_dmabuf,
|
||||
gk20a_dmabuf_vunmap(priv->pma_bytes_available_buffer_dmabuf,
|
||||
prof->pma_bytes_available_buffer_cpuva);
|
||||
#endif
|
||||
dma_buf_put(priv->pma_bytes_available_buffer_dmabuf);
|
||||
priv->pma_bytes_available_buffer_dmabuf = NULL;
|
||||
prof->pma_bytes_available_buffer_cpuva = NULL;
|
||||
|
||||
@@ -661,9 +661,6 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
|
||||
unsigned int words;
|
||||
u64 offset;
|
||||
struct dma_buf *dmabuf = NULL;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
struct dma_buf_map map;
|
||||
#endif
|
||||
|
||||
if (gk20a_debug_trace_cmdbuf) {
|
||||
u64 gpu_va = (u64)g->entry0 |
|
||||
@@ -672,14 +669,8 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
|
||||
|
||||
words = pbdma_gp_entry1_length_v(g->entry1);
|
||||
err = nvgpu_vm_find_buf(c->vm, gpu_va, &dmabuf, &offset);
|
||||
if (!err) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
err = dma_buf_vmap(dmabuf, &map);
|
||||
mem = err ? NULL : map.vaddr;
|
||||
#else
|
||||
mem = dma_buf_vmap(dmabuf);
|
||||
#endif
|
||||
}
|
||||
if (!err)
|
||||
mem = gk20a_dmabuf_vmap(dmabuf);
|
||||
}
|
||||
|
||||
if (mem) {
|
||||
@@ -698,11 +689,7 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
|
||||
mem);
|
||||
}
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
|
||||
dma_buf_vunmap(dmabuf, &map);
|
||||
#else
|
||||
dma_buf_vunmap(dmabuf, mem);
|
||||
#endif
|
||||
gk20a_dmabuf_vunmap(dmabuf, mem);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user