gpu: nvgpu: flag for physically addressed buffers

Some buffers like userd are physically addressed. If nvlink is
enabled, or device is not iommuable, this requires buffer to be
physically contiguous.

Add NVGPU_DMA_PHYSICALLY_ADDRESSED to identify such buffers, in
order to force physically contiguous allocation, only in above
cases.

Bug 2422486

Change-Id: I6426e23b064904e812e6b33e6d706391648a51ae
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1959034
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2018-11-26 13:50:14 -08:00
committed by mobile promotions
parent 2ac57a856b
commit 2b762363ac
4 changed files with 25 additions and 16 deletions

View File

@@ -74,8 +74,9 @@ static char *nvgpu_dma_flags_to_str(struct gk20a *g, unsigned long flags)
} \
} while (false)
APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
APPEND_FLAG(NVGPU_DMA_PHYSICALLY_ADDRESSED, "PHYSICALLY_ADDRESSED");
#undef APPEND_FLAG
return buf;
@@ -187,6 +188,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
WARN_ON(1);
}
if ((flags & NVGPU_DMA_PHYSICALLY_ADDRESSED) &&
(!nvgpu_iommuable(g) ||
nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG))) {
flags |= NVGPU_DMA_FORCE_CONTIGUOUS;
}
/*
* WAR for IO coherent chips: the DMA API does not seem to generate
* mappings that work correctly. Unclear why - Bug ID: 2040115.