gpu: nvgpu: vf: init gmmu related structure

vf driver implements gmmu map/unmap on client side.

- adds helper function to check whether nvgpu device is vf or legacy
vgpu.
- inits pd_cache struct for vf
- inits platform->phys_addr for ipa2pa

Jira GVSCI-15733

Change-Id: I46c84f0acdd167b9c4bdcec2f1c25f3acd6a0f71
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863430
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Prathap Kumar Valsan <prathapk@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Richard Zhao
2023-01-17 13:22:50 -08:00
committed by mobile promotions
parent de0e1be1ed
commit a7d358f773
6 changed files with 49 additions and 10 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -763,7 +763,7 @@ static int nvgpu_vm_init_attributes(struct mm_gk20a *mm,
g->ops.mm.gmmu.get_mmu_levels(g, vm->big_page_size); g->ops.mm.gmmu.get_mmu_levels(g, vm->big_page_size);
#ifdef CONFIG_NVGPU_GR_VIRTUALIZATION #ifdef CONFIG_NVGPU_GR_VIRTUALIZATION
if (g->is_virtual && userspace_managed) { if (nvgpu_is_legacy_vgpu(g) && userspace_managed) {
nvgpu_err(g, "vGPU: no userspace managed addr space support"); nvgpu_err(g, "vGPU: no userspace managed addr space support");
return -ENOSYS; return -ENOSYS;
} }
@@ -807,7 +807,7 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
/* Initialize the page table data structures. */ /* Initialize the page table data structures. */
(void) strncpy(vm->name, name, (void) strncpy(vm->name, name,
min(strlen(name), (size_t)(sizeof(vm->name)-1ULL))); min(strlen(name), (size_t)(sizeof(vm->name)-1ULL)));
if (!g->is_virtual) { if (!nvgpu_is_legacy_vgpu(g)) {
err = nvgpu_gmmu_init_page_table(vm); err = nvgpu_gmmu_init_page_table(vm);
if (err != 0) { if (err != 0) {
goto clean_up_gpu_vm; goto clean_up_gpu_vm;
@@ -985,7 +985,7 @@ static void nvgpu_vm_remove(struct vm_gk20a *vm)
nvgpu_alloc_destroy(&vm->user_lp); nvgpu_alloc_destroy(&vm->user_lp);
} }
if (!g->is_virtual) { if (!nvgpu_is_legacy_vgpu(g)) {
nvgpu_vm_free_entries(vm, &vm->pdb); nvgpu_vm_free_entries(vm, &vm->pdb);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -226,6 +226,14 @@ int vgpu_finalize_poweron_common(struct gk20a *g)
return err; return err;
} }
if (g->ops.mm.pd_cache_init != NULL) {
err = g->ops.mm.pd_cache_init(g);
if (err != 0) {
nvgpu_err(g, "failed to init pd_cache");
return err;
}
}
err = nvgpu_fifo_init_support(g); err = nvgpu_fifo_init_support(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gk20a fifo"); nvgpu_err(g, "failed to init gk20a fifo");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -812,7 +812,7 @@ static const struct gops_mm_gmmu vgpu_ga10b_ops_mm_gmmu = {
static const struct gops_mm vgpu_ga10b_ops_mm = { static const struct gops_mm vgpu_ga10b_ops_mm = {
.init_mm_support = nvgpu_init_mm_support, .init_mm_support = nvgpu_init_mm_support,
.pd_cache_init = nvgpu_pd_cache_init, .pd_cache_init = NULL,
.mm_suspend = nvgpu_mm_suspend, .mm_suspend = nvgpu_mm_suspend,
.vm_bind_channel = vgpu_vm_bind_channel, .vm_bind_channel = vgpu_vm_bind_channel,
.setup_hw = NULL, .setup_hw = NULL,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -780,7 +780,7 @@ static const struct gops_mm_gmmu vgpu_gv11b_ops_mm_gmmu = {
static const struct gops_mm vgpu_gv11b_ops_mm = { static const struct gops_mm vgpu_gv11b_ops_mm = {
.init_mm_support = nvgpu_init_mm_support, .init_mm_support = nvgpu_init_mm_support,
.pd_cache_init = nvgpu_pd_cache_init, .pd_cache_init = NULL,
.mm_suspend = nvgpu_mm_suspend, .mm_suspend = nvgpu_mm_suspend,
.vm_bind_channel = vgpu_vm_bind_channel, .vm_bind_channel = vgpu_vm_bind_channel,
.setup_hw = NULL, .setup_hw = NULL,

View File

@@ -944,6 +944,31 @@ struct gk20a {
bool is_pci_igpu; bool is_pci_igpu;
}; };
/**
* @brief Check whether nvgpu device is virtual function (VF)
*
* @param g [in] The GPU superstructure.
* @return true nvgpu device is VF
* @return false nvgpu device is not VF
*/
static inline bool nvgpu_is_vf(struct gk20a *g)
{
return g->is_virtual && g->func_regs != 0U;
}
/**
* @brief Check whether nvgpu device is legacy vgpu which does not rely on VF.
*
* @param g [in] The GPU superstructure.
* @return true nvgpu device is legacy vgpu device, for which gpu context is
* managed on gpu server
* @return false nvgpu device is not legacy vgpu device
*/
static inline bool nvgpu_is_legacy_vgpu(struct gk20a *g)
{
return g->is_virtual && g->func_regs == 0U;
}
/** /**
* @brief Check if watchdog and context switch timeouts are enabled. * @brief Check if watchdog and context switch timeouts are enabled.
* *

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU for Linux * Virtualized GPU for Linux
* *
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -95,6 +95,7 @@ static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
nvgpu_mutex_init(&g->clk_arb_enable_lock); nvgpu_mutex_init(&g->clk_arb_enable_lock);
nvgpu_mutex_init(&g->cg_pg_lock); nvgpu_mutex_init(&g->cg_pg_lock);
nvgpu_rwsem_init(&g->deterministic_busy); nvgpu_rwsem_init(&g->deterministic_busy);
nvgpu_rwsem_init(&(g->ipa_pa_cache.ipa_pa_rw_lock));
nvgpu_mutex_init(&priv->vgpu_clk_get_freq_lock); nvgpu_mutex_init(&priv->vgpu_clk_get_freq_lock);
@@ -117,6 +118,11 @@ static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
platform->unified_memory); platform->unified_memory);
nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
platform->unify_address_spaces); platform->unify_address_spaces);
if (nvgpu_is_vf(g)) {
/* only VF needs IPA2PA */
nvgpu_init_soc_vars(g);
}
} }
static int vgpu_init_support(struct platform_device *pdev) static int vgpu_init_support(struct platform_device *pdev)