diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index ab9d0d411..cad53fa19 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -497,8 +497,7 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm, * Otherwise iterate across all the chunks in this allocation and * map them. */ - sgl = sgt->sgl; - while (sgl) { + nvgpu_sgt_for_each_sgl(sgl, sgt) { if (space_to_skip && space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); @@ -526,7 +525,6 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm, */ virt_addr += chunk_length; length -= chunk_length; - sgl = nvgpu_sgt_get_next(sgt, sgl); if (length == 0) break; @@ -544,7 +542,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, { struct gk20a *g = gk20a_from_vm(vm); void *sgl; - int err; + int err = 0; if (!sgt) { /* @@ -567,10 +565,8 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, * mapping is simple since the "physical" address is actually a virtual * IO address and will be contiguous. */ - sgl = sgt->sgl; - if (!g->mm.bypass_smmu) { - u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgl, attrs); + u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgt->sgl, attrs); io_addr += space_to_skip; @@ -588,7 +584,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, * Finally: last possible case: do the no-IOMMU mapping. In this case we * really are mapping physical pages directly. */ - while (sgl) { + nvgpu_sgt_for_each_sgl(sgl, sgt) { u64 phys_addr; u64 chunk_length; @@ -616,7 +612,6 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, space_to_skip = 0; virt_addr += chunk_length; length -= chunk_length; - sgl = nvgpu_sgt_get_next(sgt, sgl); if (length == 0) break; diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c index 9d59f61dc..4f7d6248c 100644 --- a/drivers/gpu/nvgpu/common/pramin.c +++ b/drivers/gpu/nvgpu/common/pramin.c @@ -105,7 +105,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); sgt = &alloc->sgt; - for (sgl = sgt->sgl; sgl; sgl = nvgpu_sgt_get_next(sgt, sgl)) { + + nvgpu_sgt_for_each_sgl(sgl, sgt) { if (offset >= nvgpu_sgt_get_length(sgt, sgl)) offset -= nvgpu_sgt_get_length(sgt, sgl); else diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h index c2f0e37be..23a1bad79 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h @@ -104,6 +104,14 @@ struct nvgpu_mem_sgl { u64 length; }; +/* + * Iterate over the SGL entries in an SGT. + */ +#define nvgpu_sgt_for_each_sgl(__sgl__, __sgt__) \ + for ((__sgl__) = (__sgt__)->sgl; \ + (__sgl__) != NULL; \ + (__sgl__) = nvgpu_sgt_get_next(__sgt__, __sgl__)) + struct nvgpu_mem { /* * Populated for all nvgpu_mem structs - vidmem or system.