gpu: nvgpu: replace dma_buf_kmap with dma_buf_vmap

dma_buf_kmap was introduced a decade ago to map a dma_buf partially
by the input number of pages, when 32-bit was fairly common. It was
added to not exhaust vmalloc space. Starting from kernel 5.6, it is
deprecated as vmap calls should succeed with larger available
vmalloc space.

Use dma_buf_vmap/vunmap instead of dma_buf_kmap/kunmap for handling
mapping of notifier memory in gk20a_channel_wait_semaphore.

Also update the debug prints and add speculation barrier to the
start of gk20a_channel_wait.

Bug 2925664

Change-Id: I49078fa81f050a57a5b66a793e62006dd66e3ba3
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2326513
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Sagar Kamble
2020-04-09 11:21:52 +05:30
committed by Alex Waterman
parent b029f3b2b0
commit 72d01afd0c

View File

@@ -652,20 +652,33 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
dmabuf = dma_buf_get(id); if (!IS_ALIGNED(offset, 4)) {
if (IS_ERR(dmabuf)) { nvgpu_err(ch->g, "invalid semaphore offset %u", offset);
nvgpu_err(ch->g, "invalid notifier nvmap handle 0x%lx", id);
return -EINVAL; return -EINVAL;
} }
data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT); dmabuf = dma_buf_get(id);
if (!data) { if (IS_ERR(dmabuf)) {
nvgpu_err(ch->g, "failed to map notifier memory"); nvgpu_err(ch->g, "invalid semaphore dma_buf handle 0x%lx", id);
return -EINVAL;
}
if (offset > (dmabuf->size - sizeof(u32))) {
nvgpu_err(ch->g, "invalid semaphore offset %u", offset);
ret = -EINVAL; ret = -EINVAL;
goto cleanup_put; goto cleanup_put;
} }
semaphore = data + (offset & ~PAGE_MASK); nvgpu_speculation_barrier();
data = dma_buf_vmap(dmabuf);
if (!data) {
nvgpu_err(ch->g, "failed to map semaphore memory");
ret = -EINVAL;
goto cleanup_put;
}
semaphore = (u32 *)((uintptr_t)data + offset);
ret = NVGPU_COND_WAIT_INTERRUPTIBLE( ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
&ch->semaphore_wq, &ch->semaphore_wq,
@@ -673,7 +686,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
nvgpu_channel_check_unserviceable(ch), nvgpu_channel_check_unserviceable(ch),
timeout); timeout);
dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data); dma_buf_vunmap(dmabuf, data);
cleanup_put: cleanup_put:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
return ret; return ret;
@@ -706,7 +719,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
dmabuf = dma_buf_get(id); dmabuf = dma_buf_get(id);
if (IS_ERR(dmabuf)) { if (IS_ERR(dmabuf)) {
nvgpu_err(g, "invalid notifier nvmap handle 0x%lx", nvgpu_err(g, "invalid notifier dma_buf handle 0x%lx",
id); id);
return -EINVAL; return -EINVAL;
} }