mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: add guest_managed field in vm_gk20a
Add a field in vm_gk20a to identify guest managed VM, with the corresponding checks to ensure that there's no kernel section for guest managed VMs. Also make the __nvgpu_vm_init function available globally, so that the vm can be allocated elsewhere, requisite fields set, and passed to the function to initialize the vm. Change-Id: Iad841d1b8ff9c894fe9d350dc43d74247e9c5512 Signed-off-by: Sourab Gupta <sourabg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1617171 Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
7240b3c251
commit
fcdde6ad8a
@@ -109,6 +109,11 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
|
||||
|
||||
vma = vm->vma[pgsz_idx];
|
||||
|
||||
if (vm->guest_managed) {
|
||||
nvgpu_err(g, "Illegal GPU allocation on behalf of guest OS");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pgsz_idx >= gmmu_nr_page_sizes) {
|
||||
nvgpu_err(g, "(%s) invalid page size requested", vma->name);
|
||||
return 0;
|
||||
@@ -237,7 +242,10 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
/*
|
||||
* Initialize a preallocated vm
|
||||
*/
|
||||
int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
struct vm_gk20a *vm,
|
||||
u32 big_page_size,
|
||||
u64 low_hole,
|
||||
@@ -258,6 +266,9 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
if (WARN_ON(kernel_reserved + low_hole > aperture_size))
|
||||
return -ENOMEM;
|
||||
|
||||
if (WARN_ON(vm->guest_managed && kernel_reserved != 0))
|
||||
return -EINVAL;
|
||||
|
||||
nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, "
|
||||
"LP size=0x%x lowhole=0x%llx",
|
||||
name, aperture_size,
|
||||
@@ -337,7 +348,7 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
|
||||
if (WARN_ON(user_vma_start > user_vma_limit) ||
|
||||
WARN_ON(user_lp_vma_start > user_lp_vma_limit) ||
|
||||
WARN_ON(kernel_vma_start >= kernel_vma_limit)) {
|
||||
WARN_ON(!vm->guest_managed && kernel_vma_start >= kernel_vma_limit)) {
|
||||
err = -EINVAL;
|
||||
goto clean_up_page_tables;
|
||||
}
|
||||
|
||||
@@ -150,6 +150,7 @@ struct vm_gk20a {
|
||||
|
||||
bool big_pages; /* enable large page support */
|
||||
bool enable_ctag;
|
||||
bool guest_managed; /* whether the vm addr space is managed by guest */
|
||||
|
||||
u32 big_page_size;
|
||||
|
||||
@@ -284,6 +285,19 @@ int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,
|
||||
void nvgpu_remove_mapped_buf(struct vm_gk20a *vm,
|
||||
struct nvgpu_mapped_buf *mapped_buffer);
|
||||
|
||||
/*
|
||||
* Initialize a preallocated vm
|
||||
*/
|
||||
int __nvgpu_vm_init(struct mm_gk20a *mm,
|
||||
struct vm_gk20a *vm,
|
||||
u32 big_page_size,
|
||||
u64 low_hole,
|
||||
u64 kernel_reserved,
|
||||
u64 aperture_size,
|
||||
bool big_pages,
|
||||
bool userspace_managed,
|
||||
char *name);
|
||||
|
||||
struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
|
||||
u32 big_page_size,
|
||||
u64 low_hole,
|
||||
|
||||
Reference in New Issue
Block a user