gpu: nvgpu: Add support for Linux v5.11

For Linux v5.11, commit 6619ccf1bb1d ("dma-buf: Use struct dma_buf_map
in dma_buf_vmap() interfaces") changes to the dma_buf_vmap() and
dma_buf_vunmap() APIs to pass a new parameter of type
'struct dma_buf_map'. Update the NVGPU to support these updated APIs
for Linux v5.11+.

Finally, the legacy dma_buf_vmap() API returns NULL on error and not an
error code and so correct the test of the return value in the function
gk20a_cde_convert().

Bug 200687525

Change-Id: Ie20f101e965fa0f2c650d9b30ff4558ce1256c12
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2469555
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Jon Hunter
2021-01-12 19:10:02 +00:00
committed by mobile promotions
parent a03da4a077
commit ddf8f12197
4 changed files with 127 additions and 10 deletions

View File

@@ -1,7 +1,7 @@
/*
* GK20A Graphics channel
*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -136,8 +136,15 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
/* disable existing cyclestats buffer */
nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
if (priv->cyclestate_buffer_handler) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, ch->cyclestate.cyclestate_buffer);
dma_buf_vunmap(priv->cyclestate_buffer_handler, &map);
#else
dma_buf_vunmap(priv->cyclestate_buffer_handler,
ch->cyclestate.cyclestate_buffer);
#endif
dma_buf_put(priv->cyclestate_buffer_handler);
priv->cyclestate_buffer_handler = NULL;
ch->cyclestate.cyclestate_buffer = NULL;
@@ -149,6 +156,10 @@ void gk20a_channel_free_cycle_stats_buffer(struct nvgpu_channel *ch)
int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
{
struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
int err;
#endif
void *virtual_address;
struct nvgpu_channel_linux *priv = ch->os_priv;
@@ -162,7 +173,12 @@ int gk20a_channel_cycle_stats(struct nvgpu_channel *ch, int dmabuf_fd)
dmabuf = dma_buf_get(dmabuf_fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
virtual_address = err ? NULL : map.vaddr;
#else
virtual_address = dma_buf_vmap(dmabuf);
#endif
if (!virtual_address)
return -ENOMEM;
@@ -208,6 +224,9 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
struct gk20a *g = ch->g;
struct gk20a_cs_snapshot_client_linux *client_linux;
struct gk20a_cs_snapshot_client *client;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
nvgpu_mutex_acquire(&ch->cs_client_mutex);
if (ch->cs_client) {
@@ -236,8 +255,14 @@ int gk20a_attach_cycle_stats_snapshot(struct nvgpu_channel *ch,
goto err_put;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(client_linux->dma_handler, &map);
client->snapshot = ret ? NULL :
(struct gk20a_cs_snapshot_fifo *)map.vaddr;
#else
client->snapshot = (struct gk20a_cs_snapshot_fifo *)
dma_buf_vmap(client_linux->dma_handler);
#endif
if (!client->snapshot) {
ret = -ENOMEM;
goto err_put;
@@ -281,9 +306,18 @@ int gk20a_channel_free_cycle_stats_snapshot(struct nvgpu_channel *ch)
ret = nvgpu_css_detach(ch, ch->cs_client);
if (client_linux->dma_handler) {
if (ch->cs_client->snapshot)
if (ch->cs_client->snapshot) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, ch->cs_client->snapshot);
dma_buf_vunmap(client_linux->dma_handler, &map);
#else
dma_buf_vunmap(client_linux->dma_handler,
ch->cs_client->snapshot);
#endif
}
dma_buf_put(client_linux->dma_handler);
}
@@ -340,7 +374,14 @@ static void gk20a_channel_free_error_notifiers(struct nvgpu_channel *ch)
nvgpu_mutex_acquire(&priv->error_notifier.mutex);
if (priv->error_notifier.dmabuf) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
dma_buf_map_set_vaddr(&map, priv->error_notifier.vaddr);
dma_buf_vunmap(priv->error_notifier.dmabuf, &map);
#else
dma_buf_vunmap(priv->error_notifier.dmabuf, priv->error_notifier.vaddr);
#endif
dma_buf_put(priv->error_notifier.dmabuf);
priv->error_notifier.dmabuf = NULL;
priv->error_notifier.notification = NULL;
@@ -353,6 +394,10 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
struct nvgpu_set_error_notifier *args)
{
struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
int err;
#endif
void *va;
u64 end = args->offset + sizeof(struct nvgpu_notification);
struct nvgpu_channel_linux *priv = ch->os_priv;
@@ -380,7 +425,12 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
nvgpu_speculation_barrier();
/* map handle */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
err = dma_buf_vmap(dmabuf, &map);
va = err ? NULL : map.vaddr;
#else
va = dma_buf_vmap(dmabuf);
#endif
if (!va) {
dma_buf_put(dmabuf);
pr_err("Cannot map notifier handle\n");
@@ -697,6 +747,9 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
u32 payload, u32 timeout)
{
struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
void *data;
int ret = 0;
@@ -724,7 +777,12 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
nvgpu_speculation_barrier();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(dmabuf, &map);
data = ret ? NULL : map.vaddr;
#else
data = dma_buf_vmap(dmabuf);
#endif
if (!data) {
nvgpu_err(ch->g, "failed to map semaphore memory");
ret = -EINVAL;
@@ -737,7 +795,11 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
nvgpu_channel_check_unserviceable(ch),
timeout);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(dmabuf, &map);
#else
dma_buf_vunmap(dmabuf, data);
#endif
cleanup_put:
dma_buf_put(dmabuf);
return ret;
@@ -747,6 +809,9 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
struct nvgpu_wait_args *args)
{
struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
struct dma_buf_map map;
#endif
struct gk20a *g = ch->g;
struct notification *notif;
struct timespec64 tv;
@@ -783,12 +848,16 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
nvgpu_speculation_barrier();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
ret = dma_buf_vmap(dmabuf, &map);
notif = ret ? NULL : map.vaddr;
#else
notif = dma_buf_vmap(dmabuf);
#endif
if (!notif) {
nvgpu_err(g, "failed to map notifier memory");
return -ENOMEM;
}
notif = (struct notification *)((uintptr_t)notif + offset);
/* user should set status pending before
@@ -816,7 +885,11 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
notif->info16 = ch->chid; /* should be method offset */
notif_clean_up:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
dma_buf_vunmap(dmabuf, &map);
#else
dma_buf_vunmap(dmabuf, notif);
#endif
return ret;
case NVGPU_WAIT_TYPE_SEMAPHORE: