mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: introduce hal ops for vgpu_vm_init and vgpu_vm_remove
vgpu_vm_init and vgpu_vm_remove are called directly from common code if virtualization is supported. Introduce mm HAL ops vm_as_alloc_share and vm_as_free_share and call these functions through these HAL ops. Also rename these functions from vgpu_vm_init to vgpu_vm_as_alloc_share and vgpu_vm_remove to vgpu_vm_as_free_share as these function names are too generic and rename to reflect their actual functionality. For now these HAL ops are initialized only for vgpu. Jira GVSCI-517 Change-Id: I7c5af1ab5a64ce562092f75b1488524e93e8f53f Signed-off-by: Aparna Das <aparnad@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2032310 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
95f47ac13c
commit
5128237bc8
@@ -338,17 +338,21 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
|
||||
nvgpu_err(g, "vGPU: no userspace managed addr space support");
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (g->is_virtual && vgpu_vm_init(g, vm)) {
|
||||
nvgpu_err(g, "Failed to init vGPU VM!");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (g->ops.mm.vm_as_alloc_share != NULL) {
|
||||
err = g->ops.mm.vm_as_alloc_share(g, vm);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Failed to init gpu vm!");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize the page table data structures. */
|
||||
(void) strncpy(vm->name, name, min(strlen(name), sizeof(vm->name)));
|
||||
err = nvgpu_gmmu_init_page_table(vm);
|
||||
if (err != 0) {
|
||||
goto clean_up_vgpu_vm;
|
||||
goto clean_up_gpu_vm;
|
||||
}
|
||||
|
||||
/* Setup vma limits. */
|
||||
@@ -535,12 +539,10 @@ clean_up_allocators:
|
||||
clean_up_page_tables:
|
||||
/* Cleans up nvgpu_gmmu_init_page_table() */
|
||||
nvgpu_pd_free(vm, &vm->pdb);
|
||||
clean_up_vgpu_vm:
|
||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||
if (g->is_virtual) {
|
||||
vgpu_vm_remove(vm);
|
||||
clean_up_gpu_vm:
|
||||
if (g->ops.mm.vm_as_free_share != NULL) {
|
||||
g->ops.mm.vm_as_free_share(vm);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -664,11 +666,9 @@ static void nvgpu_vm_remove(struct vm_gk20a *vm)
|
||||
|
||||
nvgpu_vm_free_entries(vm, &vm->pdb);
|
||||
|
||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||
if (g->is_virtual) {
|
||||
vgpu_vm_remove(vm);
|
||||
if (g->ops.mm.vm_as_free_share != NULL) {
|
||||
g->ops.mm.vm_as_free_share(vm);
|
||||
}
|
||||
#endif
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
#include <nvgpu/channel.h>
|
||||
|
||||
#include <nvgpu/vgpu/ce_vgpu.h>
|
||||
#include <nvgpu/vgpu/vm_vgpu.h>
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
|
||||
#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
|
||||
@@ -502,6 +503,8 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.get_kind_invalid = gm20b_get_kind_invalid,
|
||||
.get_kind_pitch = gm20b_get_kind_pitch,
|
||||
.bar1_map_userd = vgpu_mm_bar1_map_userd,
|
||||
.vm_as_alloc_share = vgpu_vm_as_alloc_share,
|
||||
.vm_as_free_share = vgpu_vm_as_free_share,
|
||||
},
|
||||
.pramin = {
|
||||
.data032_r = NULL,
|
||||
|
||||
@@ -90,6 +90,7 @@
|
||||
#include <nvgpu/channel.h>
|
||||
|
||||
#include <nvgpu/vgpu/ce_vgpu.h>
|
||||
#include <nvgpu/vgpu/vm_vgpu.h>
|
||||
#include <nvgpu/gr/zbc.h>
|
||||
|
||||
#include "vgpu_gv11b.h"
|
||||
@@ -582,6 +583,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
|
||||
.bar1_map_userd = vgpu_mm_bar1_map_userd,
|
||||
.vm_as_alloc_share = vgpu_vm_as_alloc_share,
|
||||
.vm_as_free_share = vgpu_vm_as_free_share,
|
||||
},
|
||||
.therm = {
|
||||
.init_therm_setup_hw = NULL,
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
* intializing a VM on a vGPU. This alone is not enough to init a VM. See
|
||||
* nvgpu_vm_init().
|
||||
*/
|
||||
int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
|
||||
int vgpu_vm_as_alloc_share(struct gk20a *g, struct vm_gk20a *vm)
|
||||
{
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
|
||||
@@ -62,10 +62,11 @@ int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm)
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to vgpu_vm_init() this is called as part of the cleanup path for
|
||||
* VMs. This alone is not enough to remove a VM - see nvgpu_vm_remove().
|
||||
* Similar to vgpu_vm_as_alloc_share() this is called as part of the cleanup
|
||||
* path for VMs. This alone is not enough to remove a VM -
|
||||
* see nvgpu_vm_remove().
|
||||
*/
|
||||
void vgpu_vm_remove(struct vm_gk20a *vm)
|
||||
void vgpu_vm_as_free_share(struct vm_gk20a *vm)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
|
||||
@@ -1179,6 +1179,8 @@ struct gpu_ops {
|
||||
u32 (*get_flush_retries)(struct gk20a *g,
|
||||
enum nvgpu_flush_op op);
|
||||
u64 (*bar1_map_userd)(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
int (*vm_as_alloc_share)(struct gk20a *g, struct vm_gk20a *vm);
|
||||
void (*vm_as_free_share)(struct vm_gk20a *vm);
|
||||
} mm;
|
||||
/*
|
||||
* This function is called to allocate secure memory (memory
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
#define NVGPU_VGPU_VM_H
|
||||
|
||||
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
|
||||
int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm);
|
||||
void vgpu_vm_remove(struct vm_gk20a *vm);
|
||||
int vgpu_vm_as_alloc_share(struct gk20a *g, struct vm_gk20a *vm);
|
||||
void vgpu_vm_as_free_share(struct vm_gk20a *vm);
|
||||
#endif
|
||||
|
||||
#endif /* NVGPU_VGPU_VM_H */
|
||||
|
||||
Reference in New Issue
Block a user