mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Fix IPA to PA translation
Background: In Hypervisor mode dGPU device is configured in pass through mode for the Guest (QNX/Linux). GMMU programming is handled by the guest which converts a mapped buffer's GVA into SGLes in IPA (Intermediate/Guest Physical address) which is then translated into PA (Acutual Physical address) and programs the GMMU PTEes with correct GVA to PA mapping. Incase of the vgpu this work is delegated to the RM server which takes care of the GMMU programming and IPA to PA conversion. Problem: The current GMMU mapping logic in the guest assumes that PA range is continuous over a given IPA range. Hence, it doesn't account for holes being present in the PA range. But this is not the case, a continous IPA range can be mapped to dis-contiguous PA ranges. In this situation the mapping logic sets up GMMU PTEes ignoring the holes in physical memory and creates GVA => PA mapping which intrudes into the PA ranges which are reserved. This results in memory being corrupted. This change takes into account holes being present in a given PA range and for a given IPA range it also identifies the discontiguous PA ranges and sets up the PTE's appropriately. Bug 200451447 Jira VQRM-5069 Change-Id: I354d984f6c44482e4576a173fce1e90ab52283ac Signed-off-by: aalex <aalex@nvidia.com> Signed-off-by: Antony Clince Alex <aalex@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1850972 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -36,6 +36,11 @@
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
#include "platform_gk20a.h"
|
||||
|
||||
static u64 __nvgpu_sgl_ipa(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
{
|
||||
return sg_phys((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
{
|
||||
struct device *dev = dev_from_gk20a(g);
|
||||
@@ -43,7 +48,7 @@ static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
u64 ipa = sg_phys((struct scatterlist *)sgl);
|
||||
|
||||
if (platform->phys_addr)
|
||||
return platform->phys_addr(g, ipa);
|
||||
return platform->phys_addr(g, ipa, NULL);
|
||||
|
||||
return ipa;
|
||||
}
|
||||
@@ -251,6 +256,23 @@ static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
|
||||
return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_ipa(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
{
|
||||
return __nvgpu_sgl_ipa(g, sgl);
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_ipa_to_pa(struct gk20a *g,
|
||||
struct nvgpu_sgl *sgl, u64 ipa, u64 *pa_len)
|
||||
{
|
||||
struct device *dev = dev_from_gk20a(g);
|
||||
struct gk20a_platform *platform = gk20a_get_platform(dev);
|
||||
|
||||
if (platform->phys_addr)
|
||||
return platform->phys_addr(g, ipa, pa_len);
|
||||
|
||||
return ipa;
|
||||
}
|
||||
|
||||
static u64 nvgpu_mem_linux_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
|
||||
{
|
||||
return (u64)__nvgpu_sgl_phys(g, sgl);
|
||||
@@ -301,6 +323,8 @@ static void nvgpu_mem_linux_sgl_free(struct gk20a *g, struct nvgpu_sgt *sgt)
|
||||
static const struct nvgpu_sgt_ops nvgpu_linux_sgt_ops = {
|
||||
.sgl_next = nvgpu_mem_linux_sgl_next,
|
||||
.sgl_phys = nvgpu_mem_linux_sgl_phys,
|
||||
.sgl_ipa = nvgpu_mem_linux_sgl_ipa,
|
||||
.sgl_ipa_to_pa = nvgpu_mem_linux_sgl_ipa_to_pa,
|
||||
.sgl_dma = nvgpu_mem_linux_sgl_dma,
|
||||
.sgl_length = nvgpu_mem_linux_sgl_length,
|
||||
.sgl_gpu_addr = nvgpu_mem_linux_sgl_gpu_addr,
|
||||
|
||||
Reference in New Issue
Block a user