gpu: nvgpu: sim: make ring buffer independent of PAGE_SIZE

The simulator ring buffer DMA interface supports buffers of the following sizes:
4, 8, 12 and 16K. At present, it is configured to 4K and it  happens to match
with the kernel PAGE_SIZE, which is used to wrap back the GET/PUT pointers once
4K is reached. However, this is not always true; for instance, take 64K pages.
Hence, replace PAGE_SIZE with SIM_BFR_SIZE.

Introduce macro NVGPU_CPU_PAGE_SIZE which aliases to PAGE_SIZE and replace
latter with former.

Bug 200658101
Jira NVGPU-6018

Change-Id: I83cc62b87291734015c51f3e5a98173549e065de
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2420728
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2020-09-28 16:44:40 +05:30
committed by Alex Waterman
parent 09857ecd91
commit c36752fe3d
37 changed files with 120 additions and 103 deletions

View File

@@ -180,7 +180,7 @@ static void *nvgpu_dma_alloc_no_iommu(struct device *dev, size_t size,
struct page **pages;
int i = 0;
if (array_size <= PAGE_SIZE)
if (array_size <= NVGPU_CPU_PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
else
pages = vzalloc(array_size);
@@ -205,7 +205,7 @@ static void *nvgpu_dma_alloc_no_iommu(struct device *dev, size_t size,
pages[i + j] = pages[i] + j;
}
memset(page_address(pages[i]), 0, PAGE_SIZE << order);
memset(page_address(pages[i]), 0, NVGPU_CPU_PAGE_SIZE << order);
i += 1 << order;
count -= 1 << order;
@@ -216,7 +216,7 @@ static void *nvgpu_dma_alloc_no_iommu(struct device *dev, size_t size,
return (void *)pages;
error:
__nvgpu_dma_free_no_iommu(pages, i, array_size > PAGE_SIZE);
__nvgpu_dma_free_no_iommu(pages, i, array_size > NVGPU_CPU_PAGE_SIZE);
return NULL;
}
@@ -228,7 +228,7 @@ static void nvgpu_dma_free_no_iommu(size_t size, void *vaddr)
WARN_ON(!pages);
__nvgpu_dma_free_no_iommu(pages, count, array_size > PAGE_SIZE);
__nvgpu_dma_free_no_iommu(pages, count, array_size > NVGPU_CPU_PAGE_SIZE);
}
/* Check if IOMMU is available and if GPU uses it */
@@ -570,7 +570,7 @@ int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt,
}
err = sg_alloc_table_from_pages(tbl, pages,
DIV_ROUND_UP(size, PAGE_SIZE),
DIV_ROUND_UP(size, NVGPU_CPU_PAGE_SIZE),
0, size, GFP_KERNEL);
if (err)
goto fail;