gpu: nvgpu: Add support for Linux v5.11

For Linux v5.11, commit 6619ccf1bb1d ("dma-buf: Use struct dma_buf_map
in dma_buf_vmap() interfaces") changes to the dma_buf_vmap() and
dma_buf_vunmap() APIs to pass a new parameter of type
'struct dma_buf_map'. Update the NVGPU to support these updated APIs
for Linux v5.11+.

Finally, the legacy dma_buf_vmap() API returns NULL on error and not an
error code and so correct the test of the return value in the function
gk20a_cde_convert().

Bug 200687525

Change-Id: Ie20f101e965fa0f2c650d9b30ff4558ce1256c12
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2469555
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Jon Hunter
2021-01-12 19:10:02 +00:00
committed by mobile promotions
parent a03da4a077
commit ddf8f12197
4 changed files with 127 additions and 10 deletions

View File

@@ -1,7 +1,7 @@
/* /*
* Color decompression engine support * Color decompression engine support
* *
* Copyright (c) 2014-2020, NVIDIA Corporation. All rights reserved. * Copyright (c) 2014-2021, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -1042,6 +1042,9 @@ __releases(&l->cde_app->mutex)
const s16 compbits_kind = 0; const s16 compbits_kind = 0;
u32 submit_op; u32 submit_op;
struct dma_buf_attachment *attachment; struct dma_buf_attachment *attachment;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu", nvgpu_log(g, gpu_dbg_cde, "compbits_byte_offset=%llu scatterbuffer_byte_offset=%llu",
compbits_byte_offset, scatterbuffer_byte_offset); compbits_byte_offset, scatterbuffer_byte_offset);
@@ -1128,10 +1131,14 @@ __releases(&l->cde_app->mutex)
struct sg_table *sgt; struct sg_table *sgt;
void *scatter_buffer; void *scatter_buffer;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(compbits_scatter_buf, &map);
surface = err ? NULL : map.vaddr;
#else
surface = dma_buf_vmap(compbits_scatter_buf); surface = dma_buf_vmap(compbits_scatter_buf);
if (IS_ERR(surface)) { #endif
nvgpu_warn(g, if (!surface) {
"dma_buf_vmap failed"); nvgpu_warn(g, "dma_buf_vmap failed");
err = -EINVAL; err = -EINVAL;
goto exit_unmap_vaddr; goto exit_unmap_vaddr;
} }
@@ -1178,7 +1185,11 @@ __releases(&l->cde_app->mutex)
goto exit_unmap_surface; goto exit_unmap_surface;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(compbits_scatter_buf, &map);
#else
dma_buf_vunmap(compbits_scatter_buf, surface); dma_buf_vunmap(compbits_scatter_buf, surface);
#endif
surface = NULL; surface = NULL;
} }
@@ -1268,7 +1279,11 @@ __releases(&l->cde_app->mutex)
exit_unmap_surface: exit_unmap_surface:
if (surface) { if (surface) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(compbits_scatter_buf, &map);
#else
dma_buf_vunmap(compbits_scatter_buf, surface); dma_buf_vunmap(compbits_scatter_buf, surface);
#endif
} }
exit_unmap_vaddr: exit_unmap_vaddr:
nvgpu_vm_unmap(cde_ctx->vm, map_vaddr, NULL); nvgpu_vm_unmap(cde_ctx->vm, map_vaddr, NULL);

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Graphics channel * GK20A Graphics channel
* *
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -136,8 +136,15 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
/* disable existing cyclestats buffer */ /* disable existing cyclestats buffer */
nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex); nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
if (priv->cyclestate_buffer_handler) { if (priv->cyclestate_buffer_handler) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, ch->cyclestate.cyclestate_buffer);
dma_buf_vunmap(priv->cyclestate_buffer_handler, &map);
#else
dma_buf_vunmap(priv->cyclestate_buffer_handler, dma_buf_vunmap(priv->cyclestate_buffer_handler,
ch->cyclestate.cyclestate_buffer); ch->cyclestate.cyclestate_buffer);
#endif
dma_buf_put(priv->cyclestate_buffer_handler); dma_buf_put(priv->cyclestate_buffer_handler);
priv->cyclestate_buffer_handler = NULL; priv->cyclestate_buffer_handler = NULL;
ch->cyclestate.cyclestate_buffer = NULL; ch->cyclestate.cyclestate_buffer = NULL;
@@ -149,6 +156,10 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd) int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
int err;
#endif
void *virtual_address; void *virtual_address;
struct nvgpu_channel_linux *priv = ch->os_priv; struct nvgpu_channel_linux *priv = ch->os_priv;
@@ -162,7 +173,12 @@ int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
dmabuf = dma_buf_get(dmabuf_fd); dmabuf = dma_buf_get(dmabuf_fd);
if (IS_ERR(dmabuf)) if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf); return PTR_ERR(dmabuf);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
virtual_address = err ? NULL : map.vaddr;
#else
virtual_address = dma_buf_vmap(dmabuf); virtual_address = dma_buf_vmap(dmabuf);
#endif
if (!virtual_address) if (!virtual_address)
return -ENOMEM; return -ENOMEM;
@@ -208,6 +224,9 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct gk20a_cs_snapshot_client_linux *client_linux; struct gk20a_cs_snapshot_client_linux *client_linux;
struct gk20a_cs_snapshot_client *client; struct gk20a_cs_snapshot_client *client;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
nvgpu_mutex_acquire(&ch->cs_client_mutex); nvgpu_mutex_acquire(&ch->cs_client_mutex);
if (ch->cs_client) { if (ch->cs_client) {
@@ -236,8 +255,14 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
goto err_put; goto err_put;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(client_linux->dma_handler, &map);
client->snapshot = ret ? NULL :
(struct gk20a_cs_snapshot_fifo *)map.vaddr;
#else
client->snapshot = (struct gk20a_cs_snapshot_fifo *) client->snapshot = (struct gk20a_cs_snapshot_fifo *)
dma_buf_vmap(client_linux->dma_handler); dma_buf_vmap(client_linux->dma_handler);
#endif
if (!client->snapshot) { if (!client->snapshot) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_put; goto err_put;
@@ -281,9 +306,18 @@ int gk20a_channel_free_cycle_stats_snapshot(struct nvgpu_channel *ch)
ret = nvgpu_css_detach(ch, ch->cs_client); ret = nvgpu_css_detach(ch, ch->cs_client);
if (client_linux->dma_handler) { if (client_linux->dma_handler) {
if (ch->cs_client->snapshot) if (ch->cs_client->snapshot) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, ch->cs_client->snapshot);
dma_buf_vunmap(client_linux->dma_handler, &map);
#else
dma_buf_vunmap(client_linux->dma_handler, dma_buf_vunmap(client_linux->dma_handler,
ch->cs_client->snapshot); ch->cs_client->snapshot);
#endif
}
dma_buf_put(client_linux->dma_handler); dma_buf_put(client_linux->dma_handler);
} }
@@ -340,7 +374,14 @@ static void gk20a_channel_free_error_notifiers(struct nvgpu_channel *ch)
nvgpu_mutex_acquire(&priv->error_notifier.mutex); nvgpu_mutex_acquire(&priv->error_notifier.mutex);
if (priv->error_notifier.dmabuf) { if (priv->error_notifier.dmabuf) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, priv->error_notifier.vaddr);
dma_buf_vunmap(priv->error_notifier.dmabuf, &map);
#else
dma_buf_vunmap(priv->error_notifier.dmabuf, priv->error_notifier.vaddr); dma_buf_vunmap(priv->error_notifier.dmabuf, priv->error_notifier.vaddr);
#endif
dma_buf_put(priv->error_notifier.dmabuf); dma_buf_put(priv->error_notifier.dmabuf);
priv->error_notifier.dmabuf = NULL; priv->error_notifier.dmabuf = NULL;
priv->error_notifier.notification = NULL; priv->error_notifier.notification = NULL;
@@ -353,6 +394,10 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
struct nvgpu_set_error_notifier *args) struct nvgpu_set_error_notifier *args)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
int err;
#endif
void *va; void *va;
u64 end = args->offset + sizeof(struct nvgpu_notification); u64 end = args->offset + sizeof(struct nvgpu_notification);
struct nvgpu_channel_linux *priv = ch->os_priv; struct nvgpu_channel_linux *priv = ch->os_priv;
@@ -380,7 +425,12 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
/* map handle */ /* map handle */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
va = err ? NULL : map.vaddr;
#else
va = dma_buf_vmap(dmabuf); va = dma_buf_vmap(dmabuf);
#endif
if (!va) { if (!va) {
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
pr_err("Cannot map notifier handle\n"); pr_err("Cannot map notifier handle\n");
@@ -697,6 +747,9 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
u32 payload, u32 timeout) u32 payload, u32 timeout)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
void *data; void *data;
int ret = 0; int ret = 0;
@@ -724,7 +777,12 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(dmabuf, &map);
data = ret ? NULL : map.vaddr;
#else
data = dma_buf_vmap(dmabuf); data = dma_buf_vmap(dmabuf);
#endif
if (!data) { if (!data) {
nvgpu_err(ch->g, "failed to map semaphore memory"); nvgpu_err(ch->g, "failed to map semaphore memory");
ret = -EINVAL; ret = -EINVAL;
@@ -737,7 +795,11 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
nvgpu_channel_check_unserviceable(ch), nvgpu_channel_check_unserviceable(ch),
timeout); timeout);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(dmabuf, &map);
#else
dma_buf_vunmap(dmabuf, data); dma_buf_vunmap(dmabuf, data);
#endif
cleanup_put: cleanup_put:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
return ret; return ret;
@@ -747,6 +809,9 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
struct nvgpu_wait_args *args) struct nvgpu_wait_args *args)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct notification *notif; struct notification *notif;
struct timespec64 tv; struct timespec64 tv;
@@ -783,12 +848,16 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(dmabuf, &map);
notif = ret ? NULL : map.vaddr;
#else
notif = dma_buf_vmap(dmabuf); notif = dma_buf_vmap(dmabuf);
#endif
if (!notif) { if (!notif) {
nvgpu_err(g, "failed to map notifier memory"); nvgpu_err(g, "failed to map notifier memory");
return -ENOMEM; return -ENOMEM;
} }
notif = (struct notification *)((uintptr_t)notif + offset); notif = (struct notification *)((uintptr_t)notif + offset);
/* user should set status pending before /* user should set status pending before
@@ -816,7 +885,11 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
notif->info16 = ch->chid; /* should be method offset */ notif->info16 = ch->chid; /* should be method offset */
notif_clean_up: notif_clean_up:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(dmabuf, &map);
#else
dma_buf_vunmap(dmabuf, notif); dma_buf_vunmap(dmabuf, notif);
#endif
return ret; return ret;
case NVGPU_WAIT_TYPE_SEMAPHORE: case NVGPU_WAIT_TYPE_SEMAPHORE:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -351,6 +351,9 @@ static int nvgpu_prof_ioctl_alloc_pma_stream(struct nvgpu_profiler_object_priv *
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
u64 pma_bytes_available_buffer_offset; u64 pma_bytes_available_buffer_offset;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
void *cpuva; void *cpuva;
u32 pma_buffer_size; u32 pma_buffer_size;
int err; int err;
@@ -405,7 +408,12 @@ static int nvgpu_prof_ioctl_alloc_pma_stream(struct nvgpu_profiler_object_priv *
goto err_unmap_pma; goto err_unmap_pma;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
cpuva = err ? NULL : map.vaddr;
#else
cpuva = dma_buf_vmap(dmabuf); cpuva = dma_buf_vmap(dmabuf);
#endif
if (cpuva == NULL) { if (cpuva == NULL) {
err = -ENOMEM; err = -ENOMEM;
nvgpu_err(g, "failed to vmap available bytes buffer FD"); nvgpu_err(g, "failed to vmap available bytes buffer FD");
@@ -444,6 +452,9 @@ static void nvgpu_prof_free_pma_stream_priv_data(struct nvgpu_profiler_object_pr
struct nvgpu_profiler_object *prof = priv->prof; struct nvgpu_profiler_object *prof = priv->prof;
struct gk20a *g = prof->g; struct gk20a *g = prof->g;
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
if (priv->pma_bytes_available_buffer_dmabuf == NULL) { if (priv->pma_bytes_available_buffer_dmabuf == NULL) {
return; return;
@@ -456,8 +467,13 @@ static void nvgpu_prof_free_pma_stream_priv_data(struct nvgpu_profiler_object_pr
prof->pma_buffer_va = 0U; prof->pma_buffer_va = 0U;
prof->pma_buffer_size = 0U; prof->pma_buffer_size = 0U;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_map_set_vaddr(&map, prof->pma_bytes_available_buffer_cpuva);
dma_buf_vunmap(priv->pma_bytes_available_buffer_dmabuf, &map);
#else
dma_buf_vunmap(priv->pma_bytes_available_buffer_dmabuf, dma_buf_vunmap(priv->pma_bytes_available_buffer_dmabuf,
prof->pma_bytes_available_buffer_cpuva); prof->pma_bytes_available_buffer_cpuva);
#endif
dma_buf_put(priv->pma_bytes_available_buffer_dmabuf); dma_buf_put(priv->pma_bytes_available_buffer_dmabuf);
priv->pma_bytes_available_buffer_dmabuf = NULL; priv->pma_bytes_available_buffer_dmabuf = NULL;
prof->pma_bytes_available_buffer_cpuva = NULL; prof->pma_bytes_available_buffer_cpuva = NULL;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2020, NVIDIA Corporation. All rights reserved. * Copyright (c) 2017-2021, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -655,6 +655,9 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
unsigned int words; unsigned int words;
u64 offset; u64 offset;
struct dma_buf *dmabuf = NULL; struct dma_buf *dmabuf = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
if (gk20a_debug_trace_cmdbuf) { if (gk20a_debug_trace_cmdbuf) {
u64 gpu_va = (u64)g->entry0 | u64 gpu_va = (u64)g->entry0 |
@@ -663,8 +666,14 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
words = pbdma_gp_entry1_length_v(g->entry1); words = pbdma_gp_entry1_length_v(g->entry1);
err = nvgpu_vm_find_buf(c->vm, gpu_va, &dmabuf, &offset); err = nvgpu_vm_find_buf(c->vm, gpu_va, &dmabuf, &offset);
if (!err) if (!err) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
mem = err ? NULL : map.vaddr;
#else
mem = dma_buf_vmap(dmabuf); mem = dma_buf_vmap(dmabuf);
#endif
}
} }
if (mem) { if (mem) {
@@ -683,7 +692,11 @@ static void trace_write_pushbuffer(struct nvgpu_channel *c,
mem); mem);
} }
#endif #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(dmabuf, &map);
#else
dma_buf_vunmap(dmabuf, mem); dma_buf_vunmap(dmabuf, mem);
#endif
} }
} }