gpu: nvgpu: Add for_each construct for nvgpu_sgts

Add a macro to iterate across nvgpu_sgts. This makes it easier on
developers who may accidentally forget to move to the next SGL.

JIRA NVGPU-243

Change-Id: I90154a5d23f0014cb79bbcd5b6e8d8dbda303820
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1566627
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-09-19 15:28:00 -07:00
committed by mobile promotions
parent 0e8aee1c1a
commit 7a3dbdd43f
3 changed files with 14 additions and 10 deletions

View File

@@ -497,8 +497,7 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm,
* Otherwise iterate across all the chunks in this allocation and
* map them.
*/
sgl = sgt->sgl;
while (sgl) {
nvgpu_sgt_for_each_sgl(sgl, sgt) {
if (space_to_skip &&
space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
@@ -526,7 +525,6 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm,
*/
virt_addr += chunk_length;
length -= chunk_length;
sgl = nvgpu_sgt_get_next(sgt, sgl);
if (length == 0)
break;
@@ -544,7 +542,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
{
struct gk20a *g = gk20a_from_vm(vm);
void *sgl;
int err;
int err = 0;
if (!sgt) {
/*
@@ -567,10 +565,8 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
* mapping is simple since the "physical" address is actually a virtual
* IO address and will be contiguous.
*/
sgl = sgt->sgl;
if (!g->mm.bypass_smmu) {
u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgl, attrs);
u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgt->sgl, attrs);
io_addr += space_to_skip;
@@ -588,7 +584,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
* Finally: last possible case: do the no-IOMMU mapping. In this case we
* really are mapping physical pages directly.
*/
while (sgl) {
nvgpu_sgt_for_each_sgl(sgl, sgt) {
u64 phys_addr;
u64 chunk_length;
@@ -616,7 +612,6 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
space_to_skip = 0;
virt_addr += chunk_length;
length -= chunk_length;
sgl = nvgpu_sgt_get_next(sgt, sgl);
if (length == 0)
break;

View File

@@ -105,7 +105,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
sgt = &alloc->sgt;
for (sgl = sgt->sgl; sgl; sgl = nvgpu_sgt_get_next(sgt, sgl)) {
nvgpu_sgt_for_each_sgl(sgl, sgt) {
if (offset >= nvgpu_sgt_get_length(sgt, sgl))
offset -= nvgpu_sgt_get_length(sgt, sgl);
else

View File

@@ -104,6 +104,14 @@ struct nvgpu_mem_sgl {
u64 length;
};
/*
* Iterate over the SGL entries in an SGT.
*/
#define nvgpu_sgt_for_each_sgl(__sgl__, __sgt__) \
for ((__sgl__) = (__sgt__)->sgl; \
(__sgl__) != NULL; \
(__sgl__) = nvgpu_sgt_get_next(__sgt__, __sgl__))
struct nvgpu_mem {
/*
* Populated for all nvgpu_mem structs - vidmem or system.