nvidia-oot: add support for hypervisor driver

Using this patch we are adding support
for hypervisor driver

Bug 3595577
JIRA ESLC-6884

Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Change-Id: I0fc9b5eed45d584bc658c2613b33968bf8a91eaf
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2779389
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Manish Bhardwaj
2022-09-23 07:09:44 +00:00
committed by mobile promotions
parent 0d01039657
commit 6cf5029afb
7 changed files with 1932 additions and 0 deletions

View File

@@ -25,4 +25,5 @@ obj-m += spi/
obj-m += thermal/ obj-m += thermal/
obj-m += watchdog/ obj-m += watchdog/
obj-m += video/tegra/ obj-m += video/tegra/
obj-m += virt/tegra/

View File

@@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Makefile for Hypervisor interface
#
obj-m += tegra_hv.o

View File

@@ -0,0 +1,976 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <soc/tegra/virt/syscalls.h>
#include <soc/tegra/virt/hv-ivc.h>
#include <soc/tegra/ivc_ext.h>
#include "tegra_hv.h"
#define ERR(...) pr_err("tegra_hv: " __VA_ARGS__)
#define INFO(...) pr_info("tegra_hv: " __VA_ARGS__)
struct tegra_hv_data;
static struct property interrupts_prop = {
.name = "interrupts",
};
struct hv_ivc {
struct tegra_hv_data *hvd;
/*
* ivc_devs are stored in an id-indexed array; this field indicates
* a valid array entry.
*/
int valid;
/* channel configuration */
struct tegra_ivc ivc;
const struct tegra_hv_queue_data *qd;
const struct ivc_shared_area *area;
const struct guest_ivc_info *givci;
int other_guestid;
const struct tegra_hv_ivc_ops *cookie_ops;
struct tegra_hv_ivc_cookie cookie;
/* This lock synchronizes the reserved flag. */
struct mutex lock;
int reserved;
char name[16];
int irq;
};
#define cookie_to_ivc_dev(_cookie) \
container_of(_cookie, struct hv_ivc, cookie)
/* Describe all info needed to do IVC to one particular guest */
struct guest_ivc_info {
uintptr_t shmem; /* IO remapped shmem */
size_t length; /* length of shmem */
};
struct hv_mempool {
struct tegra_hv_ivm_cookie ivmk;
const struct ivc_mempool *mpd;
struct mutex lock;
int reserved;
};
struct tegra_hv_data {
const struct ivc_info_page *info;
int guestid;
struct guest_ivc_info *guest_ivc_info;
/* ivc_devs is indexed by queue id */
struct hv_ivc *ivc_devs;
uint32_t max_qid;
/* array with length info->nr_mempools */
struct hv_mempool *mempools;
struct class *hv_class;
struct device_node *dev;
};
/*
* Global HV state for read-only access by tegra_hv_... APIs
*
* This should be accessed only through get_hvd().
*/
static const struct tegra_hv_data *tegra_hv_data;
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
struct ivc_notify_info {
// Trap based notification
uintptr_t trap_region_base_va;
uintptr_t trap_region_base_ipa;
uintptr_t trap_region_end_ipa;
uint64_t trap_region_size;
// MSI based notification
uintptr_t msi_region_base_va;
uintptr_t msi_region_base_ipa;
uintptr_t msi_region_end_ipa;
uint64_t msi_region_size;
};
static struct ivc_notify_info ivc_notify;
#endif
bool is_tegra_hypervisor_mode(void)
{
#ifdef CONFIG_OF
return of_property_read_bool(of_chosen,
"nvidia,tegra-hypervisor-mode");
#else
return false;
#endif
}
EXPORT_SYMBOL(is_tegra_hypervisor_mode);
static void ivc_raise_irq(struct tegra_ivc *ivc_channel, void *data)
{
struct hv_ivc *ivc = container_of(ivc_channel, struct hv_ivc, ivc);
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
if (WARN_ON(!ivc->cookie.notify_va))
return;
*ivc->cookie.notify_va = ivc->qd->raise_irq;
#else
hyp_raise_irq(ivc->qd->raise_irq, ivc->other_guestid);
#endif
}
static const struct tegra_hv_data *get_hvd(void)
{
if (!tegra_hv_data) {
INFO("%s: not initialized yet\n", __func__);
return ERR_PTR(-EPROBE_DEFER);
} else {
return tegra_hv_data;
}
}
const struct ivc_info_page *tegra_hv_get_ivc_info(void)
{
const struct tegra_hv_data *hvd = get_hvd();
if (IS_ERR(hvd))
return (void *)hvd;
else
return tegra_hv_data->info;
}
EXPORT_SYMBOL(tegra_hv_get_ivc_info);
int tegra_hv_get_vmid(void)
{
const struct tegra_hv_data *hvd = get_hvd();
if (IS_ERR(hvd))
return -1;
else
return hvd->guestid;
}
EXPORT_SYMBOL(tegra_hv_get_vmid);
static void ivc_handle_notification(struct hv_ivc *ivc)
{
struct tegra_hv_ivc_cookie *ivck = &ivc->cookie;
/* This function should only be used when callbacks are specified. */
BUG_ON(!ivc->cookie_ops);
/* there are data in the queue, callback */
if (ivc->cookie_ops->rx_rdy && tegra_ivc_can_read(&ivc->ivc))
ivc->cookie_ops->rx_rdy(ivck);
/* there is space in the queue to write, callback */
if (ivc->cookie_ops->tx_rdy && tegra_ivc_can_write(&ivc->ivc))
ivc->cookie_ops->tx_rdy(ivck);
}
static irqreturn_t ivc_dev_cookie_irq_handler(int irq, void *data)
{
struct hv_ivc *ivcd = data;
ivc_handle_notification(ivcd);
return IRQ_HANDLED;
}
static void ivc_release_irq(struct hv_ivc *ivc)
{
BUG_ON(!ivc);
free_irq(ivc->irq, ivc);
}
static int ivc_request_cookie_irq(struct hv_ivc *ivcd)
{
return request_irq(ivcd->irq, ivc_dev_cookie_irq_handler, 0,
ivcd->name, ivcd);
}
static int tegra_hv_add_ivc(struct tegra_hv_data *hvd,
const struct tegra_hv_queue_data *qd, uint32_t index)
{
struct hv_ivc *ivc;
int ret;
int rx_first;
uintptr_t rx_base, tx_base;
uint32_t i;
struct irq_data *d;
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
uint64_t va_offset;
#endif
ivc = &hvd->ivc_devs[qd->id];
BUG_ON(ivc->valid);
ivc->valid = 1;
ivc->hvd = hvd;
ivc->qd = qd;
if (qd->peers[0] == hvd->guestid)
ivc->other_guestid = qd->peers[1];
else if (qd->peers[1] == hvd->guestid)
ivc->other_guestid = qd->peers[0];
else
BUG();
/*
* Locate the guest_ivc_info representing the remote guest accessed
* through this channel.
*/
for (i = 0; i < hvd->info->nr_areas; i++) {
if (hvd->info->areas[i].guest == ivc->other_guestid) {
ivc->givci = &hvd->guest_ivc_info[i];
ivc->area = &hvd->info->areas[i];
break;
}
}
BUG_ON(i == hvd->info->nr_areas);
BUG_ON(ivc->givci->shmem == 0);
mutex_init(&ivc->lock);
if (qd->peers[0] == qd->peers[1]) {
/*
* The queue ids of loopback queues are always consecutive, so
* the even-numbered one receives in the first area.
*/
rx_first = (qd->id & 1) == 0;
} else {
rx_first = hvd->guestid == qd->peers[0];
}
BUG_ON(qd->offset >= ivc->givci->length);
BUG_ON(qd->offset + qd->size * 2 > ivc->givci->length);
if (rx_first) {
rx_base = ivc->givci->shmem + qd->offset;
tx_base = ivc->givci->shmem + qd->offset + qd->size;
} else {
tx_base = ivc->givci->shmem + qd->offset;
rx_base = ivc->givci->shmem + qd->offset + qd->size;
}
ret = snprintf(ivc->name, sizeof(ivc->name), "ivc%u", qd->id);
if (ret < 0) {
return -EINVAL;
}
ivc->irq = of_irq_get(hvd->dev, index);
if (ivc->irq < 0) {
ERR("Unable to get irq for ivc%u\n", qd->id);
return ivc->irq;
}
d = irq_get_irq_data(ivc->irq);
if (!d) {
ERR("Failed to get data for irq %d (ivc%u)\n", ivc->irq,
qd->id);
return -ENODEV;
}
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
if (qd->msi_ipa != 0U) {
if (WARN_ON(ivc_notify.msi_region_size == 0UL))
return -EINVAL;
if (WARN_ON(!(qd->msi_ipa >= ivc_notify.msi_region_base_ipa &&
qd->msi_ipa <= ivc_notify.msi_region_end_ipa))) {
return -EINVAL;
}
va_offset = qd->msi_ipa - ivc_notify.msi_region_base_ipa;
ivc->cookie.notify_va =
(uint32_t *)(ivc_notify.msi_region_base_va +
va_offset);
} else if (qd->trap_ipa != 0U) {
if (WARN_ON(ivc_notify.trap_region_size == 0UL))
return -EINVAL;
if (WARN_ON(!(qd->trap_ipa >= ivc_notify.trap_region_base_ipa &&
qd->trap_ipa <= ivc_notify.trap_region_end_ipa))) {
return -EINVAL;
}
va_offset = qd->trap_ipa - ivc_notify.trap_region_base_ipa;
ivc->cookie.notify_va =
(uint32_t *)(ivc_notify.trap_region_base_va +
va_offset);
} else {
if (WARN_ON(ivc->cookie.notify_va == NULL))
return -EINVAL;
}
#endif
INFO("adding ivc%u: rx_base=%lx tx_base = %lx size=%x irq = %d (%lu)\n",
qd->id, rx_base, tx_base, qd->size, ivc->irq, d->hwirq);
tegra_ivc_init(&ivc->ivc, NULL, (void *)rx_base, 0, (void *)tx_base, 0, qd->nframes, qd->frame_size,
ivc_raise_irq, ivc);
/* We may have rebooted, so the channel could be active. */
ret = tegra_ivc_channel_sync(&ivc->ivc);
if (ret != 0)
return ret;
INFO("added %s\n", ivc->name);
return 0;
}
static struct hv_ivc *ivc_device_by_id(const struct tegra_hv_data *hvd, uint32_t id)
{
if (id > hvd->max_qid) {
return NULL;
} else {
struct hv_ivc *ivc = &hvd->ivc_devs[id];
if (ivc->valid)
return ivc;
else
return NULL;
}
}
static void tegra_hv_ivc_cleanup(struct tegra_hv_data *hvd)
{
if (!hvd->ivc_devs)
return;
kfree(hvd->ivc_devs);
hvd->ivc_devs = NULL;
}
static void __init tegra_hv_cleanup(struct tegra_hv_data *hvd)
{
/*
* Destroying IVC channels in use is not supported. Once it's possible
* for IVC channels to be reserved, we no longer clean up.
*/
BUG_ON(tegra_hv_data != NULL);
kfree(hvd->mempools);
hvd->mempools = NULL;
tegra_hv_ivc_cleanup(hvd);
if (hvd->guest_ivc_info) {
uint32_t i;
BUG_ON(!hvd->info);
for (i = 0; i < hvd->info->nr_areas; i++) {
if (hvd->guest_ivc_info[i].shmem) {
iounmap((void __iomem *)hvd->guest_ivc_info[i].shmem);
hvd->guest_ivc_info[i].shmem = 0;
}
}
kfree(hvd->guest_ivc_info);
hvd->guest_ivc_info = NULL;
iounmap((void __iomem *)hvd->info);
hvd->info = NULL;
}
if (hvd->hv_class) {
class_destroy(hvd->hv_class);
hvd->hv_class = NULL;
}
}
static ssize_t vmid_show(struct class *class,
struct class_attribute *attr, char *buf)
{
const struct tegra_hv_data *hvd = get_hvd();
BUG_ON(!hvd);
return snprintf(buf, PAGE_SIZE, "%d\n", hvd->guestid);
}
static CLASS_ATTR_RO(vmid);
static int __init tegra_hv_setup(struct tegra_hv_data *hvd)
{
const int intr_property_size = 3;
uint64_t info_page;
uint32_t i;
int ret;
uint32_t *interrupts_arr;
hvd->dev = of_find_compatible_node(NULL, NULL, "nvidia,tegra-hv");
if (!hvd->dev) {
ERR("could not find hv node\n");
return -ENODEV;
}
ret = hyp_read_gid(&hvd->guestid);
if (ret != 0) {
ERR("Failed to read guest id\n");
return -ENODEV;
}
hvd->hv_class = class_create(THIS_MODULE, "tegra_hv");
if (IS_ERR(hvd->hv_class)) {
ERR("class_create() failed\n");
return PTR_ERR(hvd->hv_class);
}
ret = class_create_file(hvd->hv_class, &class_attr_vmid);
if (ret != 0) {
ERR("failed to create vmid file: %d\n", ret);
return ret;
}
ret = hyp_read_ivc_info(&info_page);
if (ret != 0) {
ERR("failed to obtain IVC info page: %d\n", ret);
return ret;
}
hvd->info = (__force struct ivc_info_page *)ioremap_cache(info_page,
IVC_INFO_PAGE_SIZE);
if (hvd->info == NULL) {
ERR("failed to map IVC info page (%llx)\n", info_page);
return -ENOMEM;
}
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
/*
* Map IVC Trap MMIO Notification region
*/
ivc_notify.trap_region_base_ipa = hvd->info->trap_region_base_ipa;
ivc_notify.trap_region_size = hvd->info->trap_region_size;
if (ivc_notify.trap_region_size != 0UL) {
INFO("trap_region_base_ipa %lx: trap_region_size=%llx\n",
ivc_notify.trap_region_base_ipa,
ivc_notify.trap_region_size);
if (WARN_ON(ivc_notify.trap_region_base_ipa == 0UL))
return -EINVAL;
if (WARN_ON(ivc_notify.trap_region_base_va != 0UL))
return -EINVAL;
ivc_notify.trap_region_end_ipa =
ivc_notify.trap_region_base_ipa +
ivc_notify.trap_region_size - 1UL;
ivc_notify.trap_region_base_va =
(uintptr_t)ioremap_cache(
ivc_notify.trap_region_base_ipa,
ivc_notify.trap_region_size);
if (ivc_notify.trap_region_base_va == 0UL) {
ERR("failed to map trap ipa notification page\n");
return -ENOMEM;
}
}
/*
* Map IVC MSI Notification region
*/
ivc_notify.msi_region_base_ipa = hvd->info->msi_region_base_ipa;
ivc_notify.msi_region_size = hvd->info->msi_region_size;
if (ivc_notify.msi_region_size != 0UL) {
INFO("msi_region_base_ipa %lx: msi_region_size=%llx\n",
ivc_notify.msi_region_base_ipa,
ivc_notify.msi_region_size);
if (WARN_ON(ivc_notify.msi_region_base_ipa == 0UL))
return -EINVAL;
if (WARN_ON(ivc_notify.msi_region_base_va != 0UL))
return -EINVAL;
ivc_notify.msi_region_end_ipa = ivc_notify.msi_region_base_ipa +
ivc_notify.msi_region_size - 1UL;
ivc_notify.msi_region_base_va =
(uintptr_t)ioremap_cache(ivc_notify.msi_region_base_ipa,
ivc_notify.msi_region_size);
if (ivc_notify.msi_region_base_va == 0UL) {
ERR("failed to map msi ipa notification page\n");
return -ENOMEM;
}
}
#endif
hvd->guest_ivc_info = kzalloc(hvd->info->nr_areas *
sizeof(*hvd->guest_ivc_info), GFP_KERNEL);
if (hvd->guest_ivc_info == NULL) {
ERR("failed to allocate %u-entry givci\n",
hvd->info->nr_areas);
return -ENOMEM;
}
for (i = 0; i < hvd->info->nr_areas; i++) {
hvd->guest_ivc_info[i].shmem = (uintptr_t)ioremap_cache(
hvd->info->areas[i].pa,
hvd->info->areas[i].size);
if (hvd->guest_ivc_info[i].shmem == 0) {
ERR("can't map area for guest %u (%llx)\n",
hvd->info->areas[i].guest,
hvd->info->areas[i].pa);
return -ENOMEM;
}
hvd->guest_ivc_info[i].length = hvd->info->areas[i].size;
}
/* Do not free this, of_add_property does not copy the structure */
interrupts_arr = kmalloc(hvd->info->nr_queues * sizeof(uint32_t)
* intr_property_size, GFP_KERNEL);
if (interrupts_arr == NULL) {
ERR("failed to allocate array for interrupts property\n");
return -ENOMEM;
}
/*
* Determine the largest queue id in order to allocate a queue id-
* indexed array and device nodes, and create interrupts property
*/
hvd->max_qid = 0;
for (i = 0; i < hvd->info->nr_queues; i++) {
const struct tegra_hv_queue_data *qd =
&ivc_info_queue_array(hvd->info)[i];
if (qd->id > hvd->max_qid)
hvd->max_qid = qd->id;
/* 0 => SPI */
interrupts_arr[(i * intr_property_size)] = (__force uint32_t)cpu_to_be32(0);
interrupts_arr[(i * intr_property_size) + 1] =
(__force uint32_t)cpu_to_be32(qd->irq - 32); /* Id in SPI namespace */
/* 0x1 == low-to-high edge */
interrupts_arr[(i * intr_property_size) + 2] = (__force uint32_t)cpu_to_be32(0x1);
}
interrupts_prop.length =
hvd->info->nr_queues * sizeof(uint32_t) * intr_property_size;
interrupts_prop.value = interrupts_arr;
if (of_add_property(hvd->dev, &interrupts_prop)) {
ERR("failed to add interrupts property\n");
kfree(interrupts_arr);
return -EACCES;
}
hvd->ivc_devs = kzalloc((hvd->max_qid + 1) * sizeof(*hvd->ivc_devs),
GFP_KERNEL);
if (hvd->ivc_devs == NULL) {
ERR("failed to allocate %u-entry ivc_devs array\n",
hvd->info->nr_queues);
return -ENOMEM;
}
/* instantiate the IVC */
for (i = 0; i < hvd->info->nr_queues; i++) {
const struct tegra_hv_queue_data *qd =
&ivc_info_queue_array(hvd->info)[i];
ret = tegra_hv_add_ivc(hvd, qd, i);
if (ret != 0) {
ERR("failed to add queue #%u\n", qd->id);
return ret;
}
}
hvd->mempools =
kzalloc(hvd->info->nr_mempools * sizeof(*hvd->mempools),
GFP_KERNEL);
if (hvd->mempools == NULL) {
ERR("failed to allocate %u-entry mempools array\n",
hvd->info->nr_mempools);
return -ENOMEM;
}
/* Initialize mempools. */
for (i = 0; i < hvd->info->nr_mempools; i++) {
const struct ivc_mempool *mpd =
&ivc_info_mempool_array(hvd->info)[i];
struct tegra_hv_ivm_cookie *ivmk = &hvd->mempools[i].ivmk;
hvd->mempools[i].mpd = mpd;
mutex_init(&hvd->mempools[i].lock);
ivmk->ipa = mpd->pa;
ivmk->size = mpd->size;
ivmk->peer_vmid = mpd->peer_vmid;
INFO("added mempool %u: ipa=%llx size=%llx peer=%u\n",
mpd->id, mpd->pa, mpd->size, mpd->peer_vmid);
}
return 0;
}
static int __init tegra_hv_init(void)
{
struct tegra_hv_data *hvd;
int ret;
if (!is_tegra_hypervisor_mode())
return -ENODEV;
hvd = kzalloc(sizeof(*hvd), GFP_KERNEL);
if (!hvd) {
ERR("failed to allocate hvd\n");
return -ENOMEM;
}
ret = tegra_hv_setup(hvd);
if (ret != 0) {
tegra_hv_cleanup(hvd);
kfree(hvd);
return ret;
}
/*
* Ensure that all contents of hvd are visible before they are visible
* to other threads.
*/
smp_wmb();
BUG_ON(tegra_hv_data);
tegra_hv_data = hvd;
INFO("initialized\n");
return 0;
}
static void __exit tegra_hv_exit(void)
{
if (!is_tegra_hypervisor_mode())
return;
tegra_hv_cleanup(tegra_hv_data);
kfree(tegra_hv_data);
tegra_hv_data = NULL;
INFO("de-initialized\n");
}
static int ivc_dump(struct hv_ivc *ivc)
{
INFO("IVC#%d: IRQ=%d(%d) nframes=%d frame_size=%d offset=%d\n",
ivc->qd->id, ivc->irq, ivc->qd->irq,
ivc->qd->nframes, ivc->qd->frame_size, ivc->qd->offset);
return 0;
}
struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(struct device_node *dn,
int id, const struct tegra_hv_ivc_ops *ops)
{
const struct tegra_hv_data *hvd = get_hvd();
struct hv_ivc *ivc;
struct tegra_hv_ivc_cookie *ivck;
int ret;
if (IS_ERR(hvd))
return (void *)hvd;
ivc = ivc_device_by_id(hvd, id);
if (ivc == NULL)
return ERR_PTR(-ENODEV);
mutex_lock(&ivc->lock);
if (ivc->reserved) {
ret = -EBUSY;
} else {
ivc->reserved = 1;
ret = 0;
}
mutex_unlock(&ivc->lock);
if (ret != 0)
return ERR_PTR(ret);
ivc->cookie_ops = ops;
ivck = &ivc->cookie;
ivck->irq = ivc->irq;
ivck->peer_vmid = ivc->other_guestid;
ivck->nframes = ivc->qd->nframes;
ivck->frame_size = ivc->qd->frame_size;
if (ivc->cookie_ops) {
ivc_handle_notification(ivc);
/* request our irq */
ret = ivc_request_cookie_irq(ivc);
if (ret) {
mutex_lock(&ivc->lock);
BUG_ON(!ivc->reserved);
ivc->reserved = 0;
mutex_unlock(&ivc->lock);
return ERR_PTR(ret);
}
}
/* return pointer to the cookie */
return ivck;
}
EXPORT_SYMBOL(tegra_hv_ivc_reserve);
void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc;
if (ivck == NULL)
return;
ivc = cookie_to_ivc_dev(ivck);
#ifdef SUPPORTS_TRAP_MSI_NOTIFICATION
if (WARN_ON(!ivc->cookie.notify_va))
return;
*ivc->cookie.notify_va = ivc->qd->raise_irq;
#else
hyp_raise_irq(ivc->qd->raise_irq, ivc->other_guestid);
#endif
}
EXPORT_SYMBOL(tegra_hv_ivc_notify);
int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, uint64_t *pa,
uint64_t *size)
{
struct hv_ivc *ivc;
int ret;
if (ivck == NULL)
return -EINVAL;
ivc = cookie_to_ivc_dev(ivck);
mutex_lock(&ivc->lock);
if (ivc->reserved) {
*pa = ivc->area->pa;
*size = ivc->area->size;
ret = 0;
} else {
ret = -EINVAL;
}
mutex_unlock(&ivc->lock);
return ret;
}
EXPORT_SYMBOL(tegra_hv_ivc_get_info);
int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc;
int ret;
if (ivck == NULL)
return -EINVAL;
ivc = cookie_to_ivc_dev(ivck);
mutex_lock(&ivc->lock);
if (ivc->reserved) {
if (ivc->cookie_ops)
ivc_release_irq(ivc);
ivc->cookie_ops = NULL;
ivc->reserved = 0;
ret = 0;
} else {
ret = -EINVAL;
}
mutex_unlock(&ivc->lock);
return ret;
}
EXPORT_SYMBOL(tegra_hv_ivc_unreserve);
int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf,
int size)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_write(&ivc->ivc, NULL, buf, size);
}
EXPORT_SYMBOL(tegra_hv_ivc_write);
int tegra_hv_ivc_write_user(struct tegra_hv_ivc_cookie *ivck, const void __user *buf,
int size)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_write(&ivc->ivc, buf, NULL, size);
}
EXPORT_SYMBOL(tegra_hv_ivc_write_user);
int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_read(&ivc->ivc, NULL, buf, size);
}
EXPORT_SYMBOL(tegra_hv_ivc_read);
int tegra_hv_ivc_read_user(struct tegra_hv_ivc_cookie *ivck, void __user *buf, int size)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_read(&ivc->ivc, buf, NULL, size);
}
EXPORT_SYMBOL(tegra_hv_ivc_read_user);
int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck, void *buf,
int off, int count)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_read_peek(&ivc->ivc, NULL, buf, off, count);
}
EXPORT_SYMBOL(tegra_hv_ivc_read_peek);
int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_can_read(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_can_read);
int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_can_write(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_can_write);
int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_empty(&ivc->ivc, ivc->ivc.tx.channel);
}
EXPORT_SYMBOL(tegra_hv_ivc_tx_empty);
uint32_t tegra_hv_ivc_tx_frames_available(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_frames_available(&ivc->ivc, ivc->ivc.tx.channel);
}
EXPORT_SYMBOL(tegra_hv_ivc_tx_frames_available);
int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return ivc_dump(ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_dump);
void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_read_get_next_frame(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_read_get_next_frame);
void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_write_get_next_frame(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_write_get_next_frame);
int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_write_advance(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_write_advance);
int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_read_advance(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_read_advance);
struct tegra_ivc *tegra_hv_ivc_convert_cookie(struct tegra_hv_ivc_cookie *ivck)
{
return &cookie_to_ivc_dev(ivck)->ivc;
}
EXPORT_SYMBOL(tegra_hv_ivc_convert_cookie);
struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id)
{
uint32_t i;
struct hv_mempool *mempool;
int reserved;
if (!tegra_hv_data)
return ERR_PTR(-EPROBE_DEFER);
/* Locate a mempool with matching id. */
for (i = 0; i < tegra_hv_data->info->nr_mempools; i++) {
mempool = &tegra_hv_data->mempools[i];
if (mempool->mpd->id == id)
break;
}
if (i == tegra_hv_data->info->nr_mempools)
return ERR_PTR(-ENODEV);
mutex_lock(&mempool->lock);
reserved = mempool->reserved;
mempool->reserved = 1;
mutex_unlock(&mempool->lock);
return reserved ? ERR_PTR(-EBUSY) : &mempool->ivmk;
}
EXPORT_SYMBOL(tegra_hv_mempool_reserve);
int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ivmk)
{
int reserved;
struct hv_mempool *mempool = container_of(ivmk, struct hv_mempool,
ivmk);
mutex_lock(&mempool->lock);
reserved = mempool->reserved;
mempool->reserved = 0;
mutex_unlock(&mempool->lock);
return reserved ? 0 : -EINVAL;
}
EXPORT_SYMBOL(tegra_hv_mempool_unreserve);
int tegra_hv_ivc_channel_notified(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
return tegra_ivc_notified(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_channel_notified);
void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck)
{
struct hv_ivc *ivc = cookie_to_ivc_dev(ivck);
if (ivc->cookie_ops) {
ERR("reset unsupported with callbacks");
BUG();
}
tegra_ivc_reset(&ivc->ivc);
}
EXPORT_SYMBOL(tegra_hv_ivc_channel_reset);
module_init(tegra_hv_init);
module_exit(tegra_hv_exit);
MODULE_AUTHOR("Manish Bhardwaj <mbhardwaj@nvidia.com>");
MODULE_DESCRIPTION("Hypervisor Driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_HV_H__
#define __TEGRA_HV_H__
#include <soc/tegra/virt/syscalls.h>
#define SUPPORTS_TRAP_MSI_NOTIFICATION
#define IVC_INFO_PAGE_SIZE 65536
const struct ivc_info_page *tegra_hv_get_ivc_info(void);
int tegra_hv_get_vmid(void);
#endif /* __TEGRA_HV_H__ */

View File

@@ -0,0 +1,307 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_HV_IVC_H
#define __TEGRA_HV_IVC_H
#include <linux/of.h>
struct tegra_hv_ivc_cookie {
/* some fields that might be useful */
int irq;
int peer_vmid;
int nframes;
int frame_size;
uint32_t *notify_va; /* address used to notify end-point */
};
struct tegra_hv_ivc_ops {
/* called when data are received */
void (*rx_rdy)(struct tegra_hv_ivc_cookie *ivck);
/* called when space is available to write data */
void (*tx_rdy)(struct tegra_hv_ivc_cookie *ivck);
};
struct tegra_hv_ivm_cookie {
uint64_t ipa;
uint64_t size;
unsigned peer_vmid;
void *reserved;
};
bool is_tegra_hypervisor_mode(void);
/**
* tegra_hv_ivc_reserve - Reserve an IVC queue for use
* @dn: Device node pointer to the queue in the DT
* If NULL, then operate on first HV device
* @queue_id Id number of the queue to use.
* @ops Ops structure or NULL (deprecated)
*
* Reserves the queue for use
*
* Returns a pointer to the ivc_dev to use or an ERR_PTR.
* Note that returning EPROBE_DEFER means that the ivc driver
* hasn't loaded yet and you should try again later in the
* boot sequence.
*
* Note that @ops must be NULL for channels that handle reset.
*/
struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(
struct device_node *dn, int id,
const struct tegra_hv_ivc_ops *ops);
/**
* tegra_hv_ivc_unreserve - Unreserve an IVC queue used
* @ivck IVC cookie
*
* Unreserves the IVC channel
*
* Returns 0 on success and an error code otherwise
*/
int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write - Writes a frame to the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the data to write
* @size Size of the data to write
*
* Write a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf,
int size);
/**
* ivc_hv_ivc_write_user - Writes a frame to the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the userspace data to write
* @size Size of the data to write
*
* Write a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_write_user(struct tegra_hv_ivc_cookie *ivck, const void __user *buf,
int size);
/**
* ivc_hv_ivc_read - Reads a frame from the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the data to read
* @size max size of the data to read
*
* Reads a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size);
/**
* ivc_hv_ivc_read_user - Reads a frame from the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the userspace data to read
* @size max size of the data to read
*
* Reads a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_read_user(struct tegra_hv_ivc_cookie *ivck, void __user *buf, int size);
/**
* ivc_hv_ivc_can_read - Test whether data are available
* @ivck IVC cookie of the queue
*
* Test wheter data to read are available
*
* Returns 1 if data are available in the rx queue, 0 if not
*/
int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_can_write - Test whether data can be written
* @ivck IVC cookie of the queue
*
* Test wheter data can be written
*
* Returns 1 if data are can be written to the tx queue, 0 if not
*/
int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_ivc_tx_frames_available - gets number of free entries in tx queue
* @ivc/@ivck IVC channel or cookie
*
* Returns the number of unused entries in the tx queue. Assuming the caller
* does not write any additional frames, this number may increase from the
* value returned as the receiver consumes frames.
*
*/
uint32_t tegra_hv_ivc_tx_frames_available(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_tx_empty - Test whether the tx queue is empty
* @ivck IVC cookie of the queue
*
* Test wheter the tx queue is completely empty
*
* Returns 1 if the queue is empty, zero otherwise
*/
int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_loopback - Sets (or clears) loopback mode
* @ivck IVC cookie of the queue
* @mode Set loopback on/off (1 = on, 0 = off)
*
* Sets or clears loopback mode accordingly.
*
* When loopback is active any writes are ignored, while
* reads do not return data.
* Incoming data are copied immediately to the tx queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode);
/* debugging aid */
int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_read_peek - Peek (copying) data from a received frame
* @ivck IVC cookie of the queue
* @buf Buffer to receive the data
* @off Offset in the frame
* @count Count of bytes to copy
*
* Peek data from a received frame, copying to buf, without removing
* the frame from the queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
void *buf, int off, int count);
/**
* ivc_hv_ivc_read_get_next_frame - Peek at the next frame to receive
* @ivck IVC cookie of the queue
*
* Peek at the next frame to be received, without removing it from
* the queue.
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_read_advance - Advance the read queue
* @ivck IVC cookie of the queue
*
* Advance the read queue
*
* Returns 0, or a negative error value if failed.
*/
int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write_poke - Poke data to a frame to be transmitted
* @ivck IVC cookie of the queue
* @buf Buffer to the data
* @off Offset in the frame
* @count Count of bytes to copy
*
* Copy data to a transmit frame, copying from buf, without advancing
* the the transmit queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
const void *buf, int off, int count);
/**
* ivc_hv_ivc_write_get_next_frame - Poke at the next frame to transmit
* @ivck IVC cookie of the queue
*
* Get access to the next frame.
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write_advance - Advance the write queue
* @ivck IVC cookie of the queue
*
* Advance the write queue
*
* Returns 0, or a negative error value if failed.
*/
int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_hv_mempool_reserve - reserve a mempool for use
* @id Id of the requested mempool.
*
* Returns a cookie representing the mempool on success, otherwise an ERR_PTR.
*/
struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id);
/**
* tegra_hv_mempool_release - release a reserved mempool
* @ck Cookie returned by tegra_hv_mempool_reserve().
*
* Returns 0 on success or a negative error code otherwise.
*/
int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ck);
/**
* ivc_channel_notified - handle internal messages
* @ivck IVC cookie of the queue
*
* This function must be called following every notification (interrupt or
* callback invocation) for the tegra_hv_- version).
*
* Returns 0 if the channel is ready for communication, or -EAGAIN if a channel
* reset is in progress.
*/
int tegra_hv_ivc_channel_notified(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_channel_reset - initiates a reset of the shared memory state
* @ivck IVC cookie of the queue
*
* This function must be called after a channel is reserved before it is used
* for communication. The channel will be ready for use when a subsequent call
* to ivc_channel_notified() returns 0.
*/
void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_hv_ivc_get_info - Get info of Guest shared area
* @ivck IVC cookie of the queue
* @pa IPA of shared area
* @size Size of the shared area
*
* Get info (IPA and size) of Guest shared area
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, uint64_t *pa,
uint64_t *size);
/**
* tegra_hv_ivc_notify - Notify remote guest
* @ivck IVC cookie of the queue
*
* Notify remote guest
*
*/
void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck);
struct tegra_ivc *tegra_hv_ivc_convert_cookie(struct tegra_hv_ivc_cookie *ivck);
#endif /* __TEGRA_HV_IVC_H */

View File

@@ -0,0 +1,489 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_SYSCALLS_H__
#define __TEGRA_SYSCALLS_H__
#include <soc/tegra/virt/tegra_hv_sysmgr.h>
#define HVC_NR_READ_STAT 1
#define HVC_NR_READ_IVC 2
#define HVC_NR_READ_GID 3
#define HVC_NR_RAISE_IRQ 4
#define HVC_NR_READ_NGUESTS 5
#define HVC_NR_READ_IPA_PA 6
#define HVC_NR_READ_GUEST_STATE 7
#define HVC_NR_READ_HYP_INFO 9
#define HVC_NR_GUEST_RESET 10
#define HVC_NR_SYSINFO_IPA 13
#define HVC_NR_TRACE_GET_EVENT_MASK 0x8003U
#define HVC_NR_TRACE_SET_EVENT_MASK 0x8004U
#define GUEST_PRIMARY 0
#define GUEST_IVC_SERVER 0
#define HVC_NR_CPU_FREQ 0xC6000022
#define NGUESTS_MAX 16
#ifndef __ASSEMBLY__
#if defined(__KERNEL__)
#include <linux/types.h>
#endif
struct tegra_hv_queue_data {
uint32_t id; /* IVC id */
uint32_t peers[2];
uint32_t size;
uint32_t nframes;
uint32_t frame_size;
uint32_t offset;
uint16_t irq, raise_irq;
uint64_t trap_ipa; /** @brief IO address used to notify peer endpoint */
uint64_t msi_ipa; /** @brief MSI address used to notify peer endpoint */
};
struct ivc_mempool {
uint64_t pa;
uint64_t size;
uint32_t id;
uint32_t peer_vmid;
};
struct ivc_shared_area {
uint64_t pa;
uint64_t size;
uint32_t guest;
uint16_t free_irq_start;
uint16_t free_irq_count;
};
struct ivc_info_page {
uint32_t nr_queues;
uint32_t nr_areas;
uint32_t nr_mempools;
uint32_t padding; /**< @brief reserved for internal use */
// IMPORTANT: Padding is needed to align
// sizeof(struct ivc_info_page ) to 64 bits
uint64_t trap_region_base_ipa; /**< @brief MMIO trap region start address */
uint64_t trap_region_size; /**< @brief MMIO trap region size */
uint64_t trap_ipa_stride; /**< @brief MMIO trap IPA stride size */
uint64_t msi_region_base_ipa; /**< @brief MMIO msi region start address */
uint64_t msi_region_size; /**< @brief MMIO msi region size */
uint64_t msi_ipa_stride; /**< @brief MMIO msi IPA stride size */
/* The actual length of this array is nr_areas. */
struct ivc_shared_area areas[];
/*
* Following the shared array is an array of queue data structures with
* an entry per queue that is assigned to the guest. This array is
* terminated by an entry with no frames.
*
* struct tegra_hv_queue_data queue_data[nr_queues];
*/
/*
* Following the queue data array is an array of mempool structures
* with an entry per mempool assigned to the guest.
*
* struct ivc_mempool[nr_mempools];
*/
};
static inline struct ivc_shared_area *ivc_shared_area_addr(
const struct ivc_info_page *info, uint32_t area_num)
{
return ((struct ivc_shared_area *) (((uintptr_t) info) + sizeof(*info)))
+ area_num;
}
static inline const struct tegra_hv_queue_data *ivc_info_queue_array(
const struct ivc_info_page *info)
{
return (struct tegra_hv_queue_data *)&info->areas[info->nr_areas];
}
static inline const struct ivc_mempool *ivc_info_mempool_array(
const struct ivc_info_page *info)
{
return (struct ivc_mempool *)
&ivc_info_queue_array(info)[info->nr_queues];
}
struct hyp_ipa_pa_info {
uint64_t base; /* base of contiguous pa region */
uint64_t offset; /* offset for requested ipa address */
uint64_t size; /* size of pa region */
};
#define HVC_MAX_VCPU 64
struct trapped_access {
uint64_t ipa;
uint32_t size;
int32_t write_not_read;
uint64_t data;
uint32_t guest_id;
};
struct hyp_server_page {
/* guest reset protocol */
uint32_t guest_reset_virq;
/* boot delay offsets per VM needed by monitor partition */
uint32_t boot_delay[NGUESTS_MAX];
/* hypervisor trace log */
uint64_t log_ipa;
uint32_t log_size;
/* secure-hypervisor trace log */
uint64_t secure_log_ipa;
uint32_t secure_log_size;
/* PCT data */
uint64_t pct_ipa;
uint64_t pct_size;
/* check if the VM is a server or a guest */
uint32_t is_server_vm;
/* golden register data */
uint64_t gr_ipa;
uint32_t gr_size;
/* all vm mappings ipa */
uint64_t mappings_ipa;
};
/* For backwards compatibility, alias the old name for hyp_server_name. */
#define hyp_info_page hyp_server_page
#ifdef CONFIG_ARM64
#define _X3_X17 "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", \
"x13", "x14", "x15", "x16", "x17"
#define _X4_X17 "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", \
"x13", "x14", "x15", "x16", "x17"
static inline int hyp_read_gid(unsigned int *gid)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_GID)
: "x2", "x3", _X4_X17);
*gid = r1;
return (int)r0;
}
static inline int hyp_read_nguests(unsigned int *nguests)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_NGUESTS)
: "x2", "x3", _X4_X17);
*nguests = r1;
return (int)r0;
}
static inline int hyp_read_ivc_info(uint64_t *ivc_info_page_pa)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_IVC)
: "x2", "x3", _X4_X17);
*ivc_info_page_pa = r1;
return (int)r0;
}
static inline int hyp_read_ipa_pa_info(struct hyp_ipa_pa_info *info,
unsigned int guestid, uint64_t ipa)
{
register uint64_t r0 asm("x0") = guestid;
register uint64_t r1 asm("x1") = ipa;
register uint64_t r2 asm("x2");
register uint64_t r3 asm("x3");
asm("hvc %4"
: "+r"(r0), "+r"(r1), "=r"(r2), "=r"(r3)
: "i"(HVC_NR_READ_IPA_PA)
: _X4_X17);
info->base = r1;
info->offset = r2;
info->size = r3;
return (int)r0;
}
static inline int hyp_raise_irq(unsigned int irq, unsigned int vmid)
{
register uint64_t r0 asm("x0") = irq;
register uint64_t r1 asm("x1") = vmid;
asm volatile("hvc %1"
: "+r"(r0)
: "i"(HVC_NR_RAISE_IRQ), "r"(r1)
: "x2", "x3", _X4_X17);
return (int)r0;
}
static inline int hyp_read_guest_state(unsigned int vmid, unsigned int *state)
{
register uint64_t r0 asm("x0") = vmid;
register uint64_t r1 asm("x1");
asm("hvc %2"
: "+r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_GUEST_STATE)
: "x2", _X3_X17);
*state = (unsigned int)r1;
return (int)r0;
}
static inline int hyp_read_hyp_info(uint64_t *hyp_info_page_pa)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_HYP_INFO)
: "x2", "x3", _X4_X17);
*hyp_info_page_pa = r1;
return (int)r0;
}
static inline int hyp_guest_reset(unsigned int id,
struct hyp_sys_state_info *out)
{
register uint64_t r0 asm("x0") = id;
register uint64_t r1 asm("x1");
register uint64_t r2 asm("x2");
register uint64_t r3 asm("x3");
asm volatile("hvc %4"
: "+r"(r0), "=r"(r1),
"=r"(r2), "=r"(r3)
: "i"(HVC_NR_GUEST_RESET)
: _X4_X17);
if (out != NULL) {
out->sys_transition_mask = (uint32_t)r1;
out->vm_shutdown_mask = (uint32_t)r2;
out->vm_reboot_mask = (uint32_t)r3;
}
return (int)r0;
}
static inline uint64_t hyp_sysinfo_ipa(void)
{
register uint64_t r0 asm("x0");
asm("hvc %1"
: "=r"(r0)
: "i"(HVC_NR_SYSINFO_IPA)
: "x1", "x2", "x3", _X4_X17);
return r0;
}
static inline int hyp_read_freq_feedback(uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 1U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1 && value != NULL)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_read_freq_request(uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 0U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1 && value != NULL)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_write_freq_request(uint64_t value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 2U;
register uint64_t r2 asm("x2") = value;
asm volatile("hvc #0"
: "+r"(r0)
: "r"(r1), "r"(r2)
: "x3", _X4_X17);
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_read_freq_feedback(uint8_t cpu_id,
uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 4U;
register uint64_t r2 asm("x2") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
: "r"(r2)
: "x3", _X4_X17);
if (r0 == 1 && value != 0)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_read_freq_request(uint8_t cpu_id,
uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 3U;
register uint64_t r2 asm("x2") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
: "r"(r2)
: "x3", _X4_X17);
if (r0 == 1 && value != 0)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_write_freq_request(uint8_t cpu_id,
uint64_t value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 5U;
register uint64_t r2 asm("x2") = value;
register uint64_t r3 asm("x3") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0)
: "r"(r1), "r"(r2), "r"(r3)
: _X4_X17);
return (int16_t)r0;
}
static inline uint8_t hyp_get_cpu_count(void)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 6U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1)
return r1;
return 0;
}
static __attribute__((always_inline)) inline void hyp_call44(uint16_t id,
uint64_t args[4])
{
register uint64_t x0 asm("x0") = args[0];
register uint64_t x1 asm("x1") = args[1];
register uint64_t x2 asm("x2") = args[2];
register uint64_t x3 asm("x3") = args[3];
asm volatile("HVC %[imm16]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
:
[imm16] "i"(((uint32_t)id)));
args[0] = x0;
args[1] = x1;
args[2] = x2;
args[3] = x3;
}
static inline int hyp_trace_get_mask(uint64_t *value)
{
uint64_t args[4] = { 0U, 0U, 0U, 0U };
hyp_call44(HVC_NR_TRACE_GET_EVENT_MASK, args);
if (args[0] == 0U)
*value = args[1];
return (int) args[0];
}
static inline int hyp_trace_set_mask(uint64_t mask)
{
uint64_t args[4] = { mask, 0U, 0U, 0U };
hyp_call44(HVC_NR_TRACE_SET_EVENT_MASK, args);
return (int) args[0];
}
#undef _X3_X17
#undef _X4_X17
#else
int hyp_read_gid(unsigned int *gid);
int hyp_read_nguests(unsigned int *nguests);
int hyp_read_ivc_info(uint64_t *ivc_info_page_pa);
int hyp_read_ipa_pa_info(struct hyp_ipa_pa_info *info, int guestid,
uint64_t ipa);
int hyp_raise_irq(unsigned int irq, unsigned int vmid);
uint64_t hyp_sysinfo_ipa(void);
/* ASM prototypes */
extern int hvc_read_gid(void *);
extern int hvc_read_ivc_info(int *);
extern int hvc_read_ipa_pa_info(void *, int guestid, uint64_t ipa);
extern int hvc_read_nguests(void *);
extern int hvc_raise_irq(unsigned int irq, unsigned int vmid);
#endif /* CONFIG_ARCH_ARM64 */
#endif /* !__ASSEMBLY__ */
#endif /* __TEGRA_SYSCALLS_H__ */

View File

@@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef _TEGRA_HV_SYSMGR_H
#define _TEGRA_HV_SYSMGR_H
#include <linux/types.h>
#define SYSMGR_IVCMSG_SIZE_MAX 64
enum hv_sysmgr_msg_type {
HV_SYSMGR_MSG_TYPE_GUEST_EVENT = 1,
HV_SYSMGR_MSG_TYPE_VM_PM_CTL_CMD = 2,
HV_SYSMGR_MSG_TYPE_INVALID
};
enum hv_sysmgr_cmd_id {
HV_SYSMGR_CMD_NORMAL_SHUTDOWN = 0x0,
HV_SYSMGR_CMD_NORMAL_REBOOT = 0x1,
HV_SYSMGR_CMD_NORMAL_SUSPEND = 0x2,
HV_SYSMGR_CMD_NORMAL_RESUME = 0x3,
HV_SYSMGR_CMD_INVALID = 0xFFFFFFFF,
};
enum hv_sysmgr_resp_id {
HV_SYSMGR_RESP_ACCEPTED = 0x0,
HV_SYSMGR_RESP_UNKNOWN_COMMAND = 0xF,
};
/* This struct comes as payload of hv_pm_ctl_message */
struct hv_sysmgr_command {
uint32_t cmd_id;
uint32_t resp_id;
} __packed;
struct hv_sysmgr_message {
/* msg class */
uint32_t msg_type;
/* id of open socket */
uint32_t socket_id;
/* client data area. Payload */
uint8_t client_data[SYSMGR_IVCMSG_SIZE_MAX];
} __packed;
/*
* QUERY_SYSTEM_STATE COMMAND DATA LAYOUT
*/
struct hyp_sys_state_info {
/* Indicates System State Transition */
uint32_t sys_transition_mask;
/* Indicates which VM shutdown request is pending */
uint32_t vm_shutdown_mask;
/* Indicates which VM reboot request is pending */
uint32_t vm_reboot_mask;
/* Indicates which VM suspend request is pending */
uint32_t vm_suspend_phase_1_mask;
uint32_t vm_suspend_phase_2_mask;
/* Indicates which VM resume request is pending */
uint32_t vm_resume_mask;
};
/*
* Power management calls ID's used by SYSMGR to manage LOCAL/GLOBAL EVENTS
*/
enum system_function_id {
INVALID_FUNC,
/*
* This is used to get reboot/shutdown masks per VM from hypervisor.
* Hypervisor updates state fields on a PSCI event from the VM.
*/
QUERY_SYSTEM_STATE,
GUEST_SHUTDOWN_INIT,
GUEST_SHUTDOWN_COMPLETE,
GUEST_REBOOT_INIT,
GUEST_REBOOT_CONTINUE,
GUEST_REBOOT_COMPLETE,
SYSTEM_SHUTDOWN_INIT,
SYSTEM_SHUTDOWN_COMPLETE,
SYSTEM_REBOOT_INIT,
SYSTEM_REBOOT_COMPLETE,
GUEST_SUSPEND_REQ,
GUEST_SUSPEND_INIT,
GUEST_SUSPEND_COMPLETE,
GUEST_RESUME_INIT,
GUEST_RESUME_COMPLETE,
GUEST_PAUSE,
SYSTEM_SUSPEND_INIT,
SYSTEM_SUSPEND_COMPLETE,
MAX_FUNC_ID,
};
typedef enum {
VM_STATE_BOOT,
VM_STATE_HALT,
VM_STATE_UNHALT,
VM_STATE_REBOOT,
VM_STATE_SHUTDOWN,
VM_STATE_SUSPEND,
VM_STATE_RESUME,
VM_STATE_INVALID,
VM_STATE_MAX
} vm_state;
#define CREATE_CMD(func_id, vmid) ((func_id << 24U) | vmid)
#define QUERY_CMD CREATE_CMD(QUERY_SYSTEM_STATE, 0)
#define GUEST_SHUTDOWN_INIT_CMD(vmid) CREATE_CMD(GUEST_SHUTDOWN_INIT, vmid)
#define GUEST_SHUTDOWN_COMPLETE_CMD(vmid) \
CREATE_CMD(GUEST_SHUTDOWN_COMPLETE, vmid)
#define GUEST_REBOOT_INIT_CMD(vmid) CREATE_CMD(GUEST_REBOOT_INIT, vmid)
#define GUEST_REBOOT_CONTINUE_CMD(vmid) CREATE_CMD(GUEST_REBOOT_CONTINUE, vmid)
#define GUEST_REBOOT_COMPLETE_CMD(vmid) CREATE_CMD(GUEST_REBOOT_COMPLETE, vmid)
#define SYS_SHUTDOWN_INIT_CMD CREATE_CMD(SYSTEM_SHUTDOWN_INIT, 0)
#define SYS_SHUTDOWN_COMPLETE_CMD CREATE_CMD(SYSTEM_SHUTDOWN_COMPLETE, 0)
#define SYS_REBOOT_INIT_CMD CREATE_CMD(SYSTEM_REBOOT_INIT, 0)
#define SYS_REBOOT_COMPLETE_CMD CREATE_CMD(SYSTEM_REBOOT_COMPLETE, 0)
#define GUEST_SUSPEND_REQ_CMD(vmid) CREATE_CMD(GUEST_SUSPEND_REQ,vmid)
#define GUEST_SUSPEND_INIT_CMD(vmid) CREATE_CMD(GUEST_SUSPEND_INIT,vmid)
#define GUEST_SUSPEND_COMPLETE_CMD(vmid) \
CREATE_CMD(GUEST_SUSPEND_COMPLETE,vmid)
#define GUEST_RESUME_INIT_CMD(vmid) CREATE_CMD(GUEST_RESUME_INIT,vmid)
#define GUEST_RESUME_COMPLETE_CMD(vmid) CREATE_CMD(GUEST_RESUME_COMPLETE,vmid)
#define GUEST_PAUSE_CMD(vmid) CREATE_CMD(GUEST_PAUSE, vmid)
#define SYS_SUSPEND_INIT_CMD CREATE_CMD(SYSTEM_SUSPEND_INIT, 0)
#define SYS_SUSPEND_COMPLETE_CMD CREATE_CMD(SYSTEM_SUSPEND_COMPLETE, 0)
#endif /* _TEGRA_HV_SYSMGR_H */