gpu: nvgpu: Return error code properly from nvgpu_vm_map_linux

The function nvgpu_vm_map_linux() used to return GPU VA on successful
map or 0 when things didn't go smoothly. However, this scheme does not
propagate the actual map error back to the userspace.

So, modify the function a bit: return error and return the GPU VA via
pointer on success.

Bug 1705731

Change-Id: I2174b5fbaf64dcb00f9567dab1c583d6ddfa5d78
Signed-off-by: Sami Kiminki <skiminki@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1590961
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sami Kiminki
2017-11-02 21:28:42 +02:00
committed by mobile promotions
parent fe1e09d473
commit 9f5f029ae2
3 changed files with 26 additions and 20 deletions

View File

@@ -1046,15 +1046,16 @@ __releases(&l->cde_app->mutex)
/* map the destination buffer */
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
NV_KIND_INVALID,
compbits_kind, /* incompressible kind */
gk20a_mem_flag_none,
map_offset, map_size,
NULL);
if (!map_vaddr) {
NULL,
&map_vaddr);
if (err) {
dma_buf_put(compbits_scatter_buf);
err = -EINVAL;
goto exit_idle;

View File

@@ -201,7 +201,7 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm,
return mapped_buffer->addr;
}
u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
struct dma_buf *dmabuf,
u64 offset_align,
u32 flags,
@@ -210,7 +210,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
int rw_flag,
u64 buffer_offset,
u64 mapping_size,
struct vm_gk20a_mapping_batch *batch)
struct vm_gk20a_mapping_batch *batch,
u64 *gpu_va)
{
struct gk20a *g = gk20a_from_vm(vm);
struct device *dev = dev_from_gk20a(g);
@@ -263,12 +264,14 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
flags, map_key_kind, rw_flag);
if (map_offset) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
return map_offset;
*gpu_va = map_offset;
return 0;
}
}
sgt = gk20a_mm_pin(dev, dmabuf);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
nvgpu_warn(g, "oom allocating tracking buffer");
goto clean_up;
}
@@ -424,7 +427,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
nvgpu_mutex_release(&vm->update_gmmu_lock);
return map_offset;
*gpu_va = map_offset;
return 0;
clean_up:
nvgpu_kfree(g, mapped_buffer);
@@ -435,7 +439,7 @@ clean_up:
nvgpu_mutex_release(&vm->update_gmmu_lock);
nvgpu_log_info(g, "err=%d", err);
return 0;
return err;
}
int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
@@ -483,18 +487,18 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
return err;
}
ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
flags, compr_kind, incompr_kind,
gk20a_mem_flag_none,
buffer_offset,
mapping_size,
batch);
err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
flags, compr_kind, incompr_kind,
gk20a_mem_flag_none,
buffer_offset,
mapping_size,
batch,
&ret_va);
*offset_align = ret_va;
if (!ret_va) {
if (!err)
*offset_align = ret_va;
else
dma_buf_put(dmabuf);
err = -EINVAL;
}
return err;
}

View File

@@ -38,7 +38,7 @@ struct vm_gk20a;
struct vm_gk20a_mapping_batch;
struct nvgpu_vm_area;
u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
struct dma_buf *dmabuf,
u64 offset_align,
u32 flags,
@@ -60,7 +60,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
int rw_flag,
u64 buffer_offset,
u64 mapping_size,
struct vm_gk20a_mapping_batch *mapping_batch);
struct vm_gk20a_mapping_batch *mapping_batch,
u64 *gpu_va);
/*
* Notes: