mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
Background: In Hypervisor mode dGPU device is configured in pass through mode for the Guest (QNX/Linux). GMMU programming is handled by the guest which converts a mapped buffer's GVA into SGLes in IPA (Intermediate/Guest Physical address) which is then translated into PA (Acutual Physical address) and programs the GMMU PTEes with correct GVA to PA mapping. Incase of the vgpu this work is delegated to the RM server which takes care of the GMMU programming and IPA to PA conversion. Problem: The current GMMU mapping logic in the guest assumes that PA range is continuous over a given IPA range. Hence, it doesn't account for holes being present in the PA range. But this is not the case, a continous IPA range can be mapped to dis-contiguous PA ranges. In this situation the mapping logic sets up GMMU PTEes ignoring the holes in physical memory and creates GVA => PA mapping which intrudes into the PA ranges which are reserved. This results in memory being corrupted. This change takes into account holes being present in a given PA range and for a given IPA range it also identifies the discontiguous PA ranges and sets up the PTE's appropriately. Bug 200451447 Jira VQRM-5069 Change-Id: I354d984f6c44482e4576a173fce1e90ab52283ac Signed-off-by: aalex <aalex@nvidia.com> Signed-off-by: Antony Clince Alex <aalex@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1850972 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
130 lines
3.5 KiB
C
130 lines
3.5 KiB
C
/*
|
|
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#include <soc/tegra/chip-id.h>
|
|
#include <soc/tegra/fuse.h>
|
|
#include <soc/tegra/tegra_bpmp.h>
|
|
#ifdef CONFIG_TEGRA_HV_MANAGER
|
|
#include <soc/tegra/virt/syscalls.h>
|
|
#endif
|
|
|
|
#include <nvgpu/soc.h>
|
|
#include "os_linux.h"
|
|
#include "platform_gk20a.h"
|
|
|
|
bool nvgpu_platform_is_silicon(struct gk20a *g)
|
|
{
|
|
return tegra_platform_is_silicon();
|
|
}
|
|
|
|
bool nvgpu_platform_is_simulation(struct gk20a *g)
|
|
{
|
|
return tegra_platform_is_vdk();
|
|
}
|
|
|
|
bool nvgpu_platform_is_fpga(struct gk20a *g)
|
|
{
|
|
return tegra_platform_is_fpga();
|
|
}
|
|
|
|
bool nvgpu_is_hypervisor_mode(struct gk20a *g)
|
|
{
|
|
return is_tegra_hypervisor_mode();
|
|
}
|
|
|
|
bool nvgpu_is_bpmp_running(struct gk20a *g)
|
|
{
|
|
return tegra_bpmp_running();
|
|
}
|
|
|
|
bool nvgpu_is_soc_t194_a01(struct gk20a *g)
|
|
{
|
|
return ((tegra_get_chip_id() == TEGRA194 &&
|
|
tegra_chip_get_revision() == TEGRA194_REVISION_A01) ?
|
|
true : false);
|
|
}
|
|
|
|
#ifdef CONFIG_TEGRA_HV_MANAGER
|
|
/* When nvlink is enabled on dGPU, we need to use physical memory addresses.
|
|
* There is no SMMU translation. However, the device initially enumerates as a
|
|
* PCIe device. As such, when allocation memory for this PCIe device, the DMA
|
|
* framework ends up allocating memory using SMMU (if enabled in device tree).
|
|
* As a result, when we switch to nvlink, we need to use underlying physical
|
|
* addresses, even if memory mappings exist in SMMU.
|
|
* In addition, when stage-2 SMMU translation is enabled (for instance when HV
|
|
* is enabled), the addresses we get from dma_alloc are IPAs. We need to
|
|
* convert them to PA.
|
|
*/
|
|
static u64 nvgpu_tegra_hv_ipa_pa(struct gk20a *g, u64 ipa, u64 *pa_len)
|
|
{
|
|
struct device *dev = dev_from_gk20a(g);
|
|
struct gk20a_platform *platform = gk20a_get_platform(dev);
|
|
struct hyp_ipa_pa_info info;
|
|
int err;
|
|
u64 pa = 0ULL;
|
|
|
|
err = hyp_read_ipa_pa_info(&info, platform->vmid, ipa);
|
|
if (err < 0) {
|
|
/* WAR for bug 2096877
|
|
* hyp_read_ipa_pa_info only looks up RAM mappings.
|
|
* assume one to one IPA:PA mapping for syncpt aperture
|
|
*/
|
|
u64 start = g->syncpt_unit_base;
|
|
u64 end = g->syncpt_unit_base + g->syncpt_unit_size;
|
|
if ((ipa >= start) && (ipa < end)) {
|
|
pa = ipa;
|
|
nvgpu_log(g, gpu_dbg_map_v,
|
|
"ipa=%llx vmid=%d -> pa=%llx (SYNCPT)\n",
|
|
ipa, platform->vmid, pa);
|
|
} else {
|
|
nvgpu_err(g, "ipa=%llx translation failed vmid=%u err=%d",
|
|
ipa, platform->vmid, err);
|
|
}
|
|
} else {
|
|
pa = info.base + info.offset;
|
|
if (pa_len != NULL) {
|
|
/*
|
|
* Update the size of physical memory chunk after the
|
|
* specified offset.
|
|
*/
|
|
*pa_len = info.size - info.offset;
|
|
}
|
|
nvgpu_log(g, gpu_dbg_map_v,
|
|
"ipa=%llx vmid=%d -> pa=%llx "
|
|
"base=%llx offset=%llx size=%llx\n",
|
|
ipa, platform->vmid, pa, info.base,
|
|
info.offset, info.size);
|
|
}
|
|
return pa;
|
|
}
|
|
#endif
|
|
|
|
int nvgpu_init_soc_vars(struct gk20a *g)
|
|
{
|
|
#ifdef CONFIG_TEGRA_HV_MANAGER
|
|
struct device *dev = dev_from_gk20a(g);
|
|
struct gk20a_platform *platform = gk20a_get_platform(dev);
|
|
int err;
|
|
|
|
if (nvgpu_is_hypervisor_mode(g)) {
|
|
err = hyp_read_gid(&platform->vmid);
|
|
if (err) {
|
|
nvgpu_err(g, "failed to read vmid");
|
|
return err;
|
|
}
|
|
platform->phys_addr = nvgpu_tegra_hv_ipa_pa;
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|