mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: update kmdi interfaces
Patch udpates/fixes following issues. - Updates nvgpu_dbg_gpu_get_mappings_entry.size to u64 to address >4G limitations. - Removes offset from original cpuva and unmaps only original mapped address. - Call nvgpu_vm_find_mapped_buf_range() in place of nvgpu_vm_find_mapped_buf() to find the addresses which are not page aligned. - Update logic to parse the gpuva while trying to find gpu mappings so that gpuva which are more than the mapped buffer base address can also be considered. Bug 200722275 Change-Id: If33d85db37a9f03a662984c212544a8b2ade471c Signed-off-by: prsethi <prsethi@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2612129 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Dinesh T <dt@nvidia.com> Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
ad884ffa53
commit
3651d1150d
@@ -2294,13 +2294,13 @@ static void nvgpu_dbg_gpu_get_valid_mappings(struct nvgpu_channel *ch, u64 start
|
|||||||
{
|
{
|
||||||
struct vm_gk20a *vm = ch->vm;
|
struct vm_gk20a *vm = ch->vm;
|
||||||
u64 key = start;
|
u64 key = start;
|
||||||
u32 size = 0;
|
u64 size = 0;
|
||||||
struct nvgpu_mapped_buf *mbuf_curr = NULL;
|
struct nvgpu_mapped_buf *mapped_buf = NULL;
|
||||||
struct nvgpu_mapped_buf *mbuf_last = NULL;
|
|
||||||
struct nvgpu_rbtree_node *node = NULL;
|
struct nvgpu_rbtree_node *node = NULL;
|
||||||
struct dma_buf *dmabuf = NULL;
|
struct dma_buf *dmabuf = NULL;
|
||||||
u32 f_mode = FMODE_READ;
|
u32 f_mode = FMODE_READ;
|
||||||
u32 count = 0;
|
u32 count = 0;
|
||||||
|
u64 offset = 0;
|
||||||
bool just_count = *buf_count ? false : true;
|
bool just_count = *buf_count ? false : true;
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||||
@@ -2308,17 +2308,17 @@ static void nvgpu_dbg_gpu_get_valid_mappings(struct nvgpu_channel *ch, u64 start
|
|||||||
nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
|
nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
|
||||||
|
|
||||||
while (node != NULL) {
|
while (node != NULL) {
|
||||||
mbuf_curr = mapped_buffer_from_rbtree_node(node);
|
mapped_buf = mapped_buffer_from_rbtree_node(node);
|
||||||
dmabuf = mbuf_curr->os_priv.dmabuf;
|
dmabuf = mapped_buf->os_priv.dmabuf;
|
||||||
|
|
||||||
/* Find first key node */
|
/* Find first key node */
|
||||||
if (key > (mbuf_curr->addr + mbuf_curr->size)) {
|
if (key > (mapped_buf->addr + mapped_buf->size)) {
|
||||||
nvgpu_rbtree_enum_next(&node, node);
|
nvgpu_rbtree_enum_next(&node, node);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (key < mbuf_curr->addr) {
|
if (key < mapped_buf->addr) {
|
||||||
key = mbuf_curr->addr;
|
key = mapped_buf->addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (key >= end) {
|
if (key >= end) {
|
||||||
@@ -2332,18 +2332,17 @@ static void nvgpu_dbg_gpu_get_valid_mappings(struct nvgpu_channel *ch, u64 start
|
|||||||
* count to get the correct buffer index as it was increased in
|
* count to get the correct buffer index as it was increased in
|
||||||
* last iteration.
|
* last iteration.
|
||||||
*/
|
*/
|
||||||
|
if ((offset + size == mapped_buf->addr) && count &&
|
||||||
if (mbuf_last &&
|
(f_mode == dmabuf->file->f_mode)) {
|
||||||
(mbuf_last->addr + mbuf_last->size == mbuf_curr->addr)
|
|
||||||
&& (f_mode == dmabuf->file->f_mode)) {
|
|
||||||
count--;
|
count--;
|
||||||
size += min(end, mbuf_curr->addr
|
size += min(end, mapped_buf->addr
|
||||||
+ mbuf_curr->size) - key;
|
+ mapped_buf->size) - key;
|
||||||
} else {
|
} else {
|
||||||
size = min(end, mbuf_curr->addr
|
size = min(end, mapped_buf->addr
|
||||||
+ mbuf_curr->size) - key;
|
+ mapped_buf->size) - key;
|
||||||
|
offset = key;
|
||||||
if (just_count == false) {
|
if (just_count == false) {
|
||||||
buffer[count].gpu_va = mbuf_curr->addr;
|
buffer[count].gpu_va = offset;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2357,7 +2356,6 @@ static void nvgpu_dbg_gpu_get_valid_mappings(struct nvgpu_channel *ch, u64 start
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mbuf_last = mbuf_curr;
|
|
||||||
f_mode = dmabuf->file->f_mode;
|
f_mode = dmabuf->file->f_mode;
|
||||||
nvgpu_rbtree_enum_next(&node, node);
|
nvgpu_rbtree_enum_next(&node, node);
|
||||||
}
|
}
|
||||||
@@ -2447,21 +2445,21 @@ static int nvgpu_gpu_access_sysmem_gpu_va(struct gk20a *g, u8 cmd, u32 size,
|
|||||||
|
|
||||||
ret = dma_buf_vmap(dmabuf, &map);
|
ret = dma_buf_vmap(dmabuf, &map);
|
||||||
cpu_va = ret ? NULL : map.vaddr;
|
cpu_va = ret ? NULL : map.vaddr;
|
||||||
|
#else
|
||||||
|
cpu_va = (u8 *)dma_buf_vmap(dmabuf);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!cpu_va) {
|
if (!cpu_va) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
cpu_va = (u8 *)dma_buf_vmap(dmabuf) + offset;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_READ:
|
case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_READ:
|
||||||
nvgpu_memcpy((u8 *)data, cpu_va, size);
|
nvgpu_memcpy((u8 *)data, cpu_va + offset, size);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_WRITE:
|
case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_WRITE:
|
||||||
nvgpu_memcpy(cpu_va, (u8 *)data, size);
|
nvgpu_memcpy(cpu_va + offset, (u8 *)data, size);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -2596,7 +2594,7 @@ static int nvgpu_dbg_gpu_access_gpu_va_mapping(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
mapped_buf = nvgpu_vm_find_mapped_buf(vm, gpu_va);
|
mapped_buf = nvgpu_vm_find_mapped_buf_range(vm, gpu_va);
|
||||||
if (mapped_buf == NULL) {
|
if (mapped_buf == NULL) {
|
||||||
nvgpu_err(g, "gpuva is not mapped");
|
nvgpu_err(g, "gpuva is not mapped");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|||||||
@@ -546,8 +546,7 @@ struct nvgpu_dbg_gpu_get_mappings_entry {
|
|||||||
/* out: start of GPU VA for this mapping */
|
/* out: start of GPU VA for this mapping */
|
||||||
__u64 gpu_va;
|
__u64 gpu_va;
|
||||||
/* out: size in bytes of this mapping */
|
/* out: size in bytes of this mapping */
|
||||||
__u32 size;
|
__u64 size;
|
||||||
__u32 reserved;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvgpu_dbg_gpu_get_mappings_args {
|
struct nvgpu_dbg_gpu_get_mappings_args {
|
||||||
|
|||||||
Reference in New Issue
Block a user