Files
linux-nvgpu/drivers/gpu/nvgpu/os/linux/os_linux.h
Deepak Nibade a1bbcff476 gpu: nvgpu: enumerate dev nodes per GPU instance in MIG mode
In MIG mode, each of the dev nodes should be enumerated for each fGPU.
And for physical instance only the "ctrl" node should be enumerated.

Support this with below set of changes :

- Add struct nvgpu_mig_static_info that describes static GPU instance
  configuration. GPCs are enumerated only during poweron and grmgr unit
  will populate instance information based on number of GPCs.
  For linux, GPU poweron happens only with first gk20a_busy() call and
  instance information is not available during probe() time. Hence this
  static table is a temporary solution until proper solution is
  identified.

- Add nvgpu_default_mig_static_info for iGPU and
  nvgpu_default_pci_mig_static_info for dGPU that describes GPU instance
  partition.

- Add new function nvgpu_prepare_mig_dev_node_class_list() that parses
  the static table and creates one class per instance in MIG mode.
  Non-MIG mode classes are now enumerated in
  nvgpu_prepare_default_dev_node_class_list().

- Add new structure nvgpu_cdev_class_priv_data to store private data for
  each cdev. This will hold instance specific information and pointer to
  private data will be maintained in struct class and also passed as
  private data while creating device node with device_create()

- Add nvgpu_mig_phys_devnode() to set dev node path/names for fGPUs and
  add nvgpu_mig_fgpu_devnode() to set dev node path/names for physical
  instance in MIG mode.

- Add new field mig_physical_node to struct nvgpu_dev_node. This field
  is set if corresponding dev node should be created for physical
  instance in MIG mode. For now set it only for "ctrl" node.

Jira NVGPU-5648

Change-Id: Ic97874eece1fbe0083b3ac4c48e36e06004f1bc2
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2434586
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Lakshmanan M <lm@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2020-12-15 14:13:28 -06:00

208 lines
4.8 KiB
C

/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef NVGPU_OS_LINUX_H
#define NVGPU_OS_LINUX_H
#include <linux/cdev.h>
#include <linux/iommu.h>
#include <linux/hashtable.h>
#include <linux/notifier.h>
#include <linux/version.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/interrupts.h>
#include "cde.h"
#include "sched.h"
struct class;
struct nvgpu_os_linux_ops {
struct {
void (*get_program_numbers)(struct gk20a *g,
u32 block_height_log2,
u32 shader_parameter,
int *hprog, int *vprog);
bool (*need_scatter_buffer)(struct gk20a *g);
int (*populate_scatter_buffer)(struct gk20a *g,
struct sg_table *sgt,
size_t surface_size,
void *scatter_buffer_ptr,
size_t scatter_buffer_size);
} cde;
struct {
int (*init_debugfs)(struct gk20a *g);
} clk;
struct {
int (*init_debugfs)(struct gk20a *g);
} therm;
struct {
int (*init_debugfs)(struct gk20a *g);
} fecs_trace;
struct {
int (*init_debugfs)(struct gk20a *g);
} volt;
struct {
int (*init_debugfs)(struct gk20a *g);
} s_param;
};
struct dgpu_thermal_alert {
struct workqueue_struct *workqueue;
struct work_struct work;
u32 therm_alert_irq;
u32 event_delay;
};
struct nvgpu_cdev {
struct cdev cdev;
struct device *node;
struct class *class;
struct nvgpu_list_node list_entry;
};
static inline struct nvgpu_cdev *
nvgpu_cdev_from_list_entry(struct nvgpu_list_node *node)
{
return (struct nvgpu_cdev *)
((uintptr_t)node - offsetof(struct nvgpu_cdev, list_entry));
};
struct nvgpu_cdev_class_priv_data {
char class_name[64];
u32 local_instance_id;
u32 major_instance_id;
u32 minor_instance_id;
bool pci;
};
struct nvgpu_class {
struct class *class;
struct nvgpu_list_node list_entry;
struct nvgpu_cdev_class_priv_data *priv_data;
enum nvgpu_mig_gpu_instance_type instance_type;
};
static inline struct nvgpu_class *
nvgpu_class_from_list_entry(struct nvgpu_list_node *node)
{
return (struct nvgpu_class *)
((uintptr_t)node - offsetof(struct nvgpu_class, list_entry));
};
struct nvgpu_os_linux {
struct gk20a g;
struct device *dev;
struct dgpu_thermal_alert thermal_alert;
struct nvgpu_interrupts interrupts;
struct nvgpu_list_node class_list_head;
struct nvgpu_list_node cdev_list_head;
u32 num_cdevs;
dev_t cdev_region;
/* see gk20a_ctrl_priv */
struct nvgpu_list_node ctrl_privs;
/* guards modifications to the list and its contents */
struct nvgpu_mutex ctrl_privs_lock;
struct devfreq *devfreq;
struct device_dma_parameters dma_parms;
atomic_t nonstall_ops;
struct work_struct nonstall_fn_work;
struct workqueue_struct *nonstall_work_queue;
struct resource *reg_mem;
void __iomem *regs;
void __iomem *regs_saved;
u64 regs_bus_addr;
struct resource *bar1_mem;
void __iomem *bar1;
void __iomem *bar1_saved;
void __iomem *usermode_regs;
void __iomem *usermode_regs_saved;
u64 usermode_regs_bus_addr;
struct nvgpu_os_linux_ops ops;
struct notifier_block nvgpu_reboot_nb;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct dentry *debugfs_alias;
struct dentry *debugfs_ltc_enabled;
struct dentry *debugfs_timeouts_enabled;
struct dentry *debugfs_disable_bigpage;
struct dentry *debugfs_runlist_interleave;
struct dentry *debugfs_allocators;
struct dentry *debugfs_xve;
struct dentry *debugfs_kmem;
struct dentry *debugfs_hal;
struct dentry *debugfs_ltc;
struct dentry *debugfs_dump_ctxsw_stats;
#endif
DECLARE_HASHTABLE(ecc_sysfs_stats_htable, 5);
struct dev_ext_attribute *ecc_attrs;
struct gk20a_cde_app cde_app;
struct rw_semaphore busy_lock;
struct nvgpu_mutex dmabuf_priv_list_lock;
struct nvgpu_list_node dmabuf_priv_list;
bool init_done;
/** Debugfs knob for forcing syncpt support off in runtime. */
bool disable_syncpoints;
bool enable_platform_dbg;
};
static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g)
{
return container_of(g, struct nvgpu_os_linux, g);
}
static inline struct device *dev_from_gk20a(struct gk20a *g)
{
return nvgpu_os_linux_from_gk20a(g)->dev;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
#define totalram_size_in_mb (totalram_pages() >> (10 - (PAGE_SHIFT - 10)))
#else
#define totalram_size_in_mb (totalram_pages >> (10 - (PAGE_SHIFT - 10)))
#endif
#endif