nvidia-oot: add support for hypervisor driver

Using this patch we are adding support
for hypervisor driver

Bug 3595577
JIRA ESLC-6884

Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Change-Id: I0fc9b5eed45d584bc658c2613b33968bf8a91eaf
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2779389
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Manish Bhardwaj
2022-09-23 07:09:44 +00:00
committed by mobile promotions
parent 0d01039657
commit 6cf5029afb
7 changed files with 1932 additions and 0 deletions

View File

@@ -0,0 +1,307 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_HV_IVC_H
#define __TEGRA_HV_IVC_H
#include <linux/of.h>
struct tegra_hv_ivc_cookie {
/* some fields that might be useful */
int irq;
int peer_vmid;
int nframes;
int frame_size;
uint32_t *notify_va; /* address used to notify end-point */
};
struct tegra_hv_ivc_ops {
/* called when data are received */
void (*rx_rdy)(struct tegra_hv_ivc_cookie *ivck);
/* called when space is available to write data */
void (*tx_rdy)(struct tegra_hv_ivc_cookie *ivck);
};
struct tegra_hv_ivm_cookie {
uint64_t ipa;
uint64_t size;
unsigned peer_vmid;
void *reserved;
};
bool is_tegra_hypervisor_mode(void);
/**
* tegra_hv_ivc_reserve - Reserve an IVC queue for use
* @dn: Device node pointer to the queue in the DT
* If NULL, then operate on first HV device
* @queue_id Id number of the queue to use.
* @ops Ops structure or NULL (deprecated)
*
* Reserves the queue for use
*
* Returns a pointer to the ivc_dev to use or an ERR_PTR.
* Note that returning EPROBE_DEFER means that the ivc driver
* hasn't loaded yet and you should try again later in the
* boot sequence.
*
* Note that @ops must be NULL for channels that handle reset.
*/
struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve(
struct device_node *dn, int id,
const struct tegra_hv_ivc_ops *ops);
/**
* tegra_hv_ivc_unreserve - Unreserve an IVC queue used
* @ivck IVC cookie
*
* Unreserves the IVC channel
*
* Returns 0 on success and an error code otherwise
*/
int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write - Writes a frame to the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the data to write
* @size Size of the data to write
*
* Write a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf,
int size);
/**
* ivc_hv_ivc_write_user - Writes a frame to the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the userspace data to write
* @size Size of the data to write
*
* Write a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_write_user(struct tegra_hv_ivc_cookie *ivck, const void __user *buf,
int size);
/**
* ivc_hv_ivc_read - Reads a frame from the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the data to read
* @size max size of the data to read
*
* Reads a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, int size);
/**
* ivc_hv_ivc_read_user - Reads a frame from the IVC queue
* @ivck IVC cookie of the queue
* @buf Pointer to the userspace data to read
* @size max size of the data to read
*
* Reads a number of bytes (as a single frame) from the queue.
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_read_user(struct tegra_hv_ivc_cookie *ivck, void __user *buf, int size);
/**
* ivc_hv_ivc_can_read - Test whether data are available
* @ivck IVC cookie of the queue
*
* Test wheter data to read are available
*
* Returns 1 if data are available in the rx queue, 0 if not
*/
int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_can_write - Test whether data can be written
* @ivck IVC cookie of the queue
*
* Test wheter data can be written
*
* Returns 1 if data are can be written to the tx queue, 0 if not
*/
int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_ivc_tx_frames_available - gets number of free entries in tx queue
* @ivc/@ivck IVC channel or cookie
*
* Returns the number of unused entries in the tx queue. Assuming the caller
* does not write any additional frames, this number may increase from the
* value returned as the receiver consumes frames.
*
*/
uint32_t tegra_hv_ivc_tx_frames_available(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_tx_empty - Test whether the tx queue is empty
* @ivck IVC cookie of the queue
*
* Test wheter the tx queue is completely empty
*
* Returns 1 if the queue is empty, zero otherwise
*/
int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_loopback - Sets (or clears) loopback mode
* @ivck IVC cookie of the queue
* @mode Set loopback on/off (1 = on, 0 = off)
*
* Sets or clears loopback mode accordingly.
*
* When loopback is active any writes are ignored, while
* reads do not return data.
* Incoming data are copied immediately to the tx queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode);
/* debugging aid */
int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_read_peek - Peek (copying) data from a received frame
* @ivck IVC cookie of the queue
* @buf Buffer to receive the data
* @off Offset in the frame
* @count Count of bytes to copy
*
* Peek data from a received frame, copying to buf, without removing
* the frame from the queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck,
void *buf, int off, int count);
/**
* ivc_hv_ivc_read_get_next_frame - Peek at the next frame to receive
* @ivck IVC cookie of the queue
*
* Peek at the next frame to be received, without removing it from
* the queue.
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_read_advance - Advance the read queue
* @ivck IVC cookie of the queue
*
* Advance the read queue
*
* Returns 0, or a negative error value if failed.
*/
int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write_poke - Poke data to a frame to be transmitted
* @ivck IVC cookie of the queue
* @buf Buffer to the data
* @off Offset in the frame
* @count Count of bytes to copy
*
* Copy data to a transmit frame, copying from buf, without advancing
* the the transmit queue.
*
* Returns 0 on success, a negative error code otherwise
*/
int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck,
const void *buf, int off, int count);
/**
* ivc_hv_ivc_write_get_next_frame - Poke at the next frame to transmit
* @ivck IVC cookie of the queue
*
* Get access to the next frame.
*
* Returns a pointer to the frame, or an error encoded pointer.
*/
void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_hv_ivc_write_advance - Advance the write queue
* @ivck IVC cookie of the queue
*
* Advance the write queue
*
* Returns 0, or a negative error value if failed.
*/
int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_hv_mempool_reserve - reserve a mempool for use
* @id Id of the requested mempool.
*
* Returns a cookie representing the mempool on success, otherwise an ERR_PTR.
*/
struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id);
/**
* tegra_hv_mempool_release - release a reserved mempool
* @ck Cookie returned by tegra_hv_mempool_reserve().
*
* Returns 0 on success or a negative error code otherwise.
*/
int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ck);
/**
* ivc_channel_notified - handle internal messages
* @ivck IVC cookie of the queue
*
* This function must be called following every notification (interrupt or
* callback invocation) for the tegra_hv_- version).
*
* Returns 0 if the channel is ready for communication, or -EAGAIN if a channel
* reset is in progress.
*/
int tegra_hv_ivc_channel_notified(struct tegra_hv_ivc_cookie *ivck);
/**
* ivc_channel_reset - initiates a reset of the shared memory state
* @ivck IVC cookie of the queue
*
* This function must be called after a channel is reserved before it is used
* for communication. The channel will be ready for use when a subsequent call
* to ivc_channel_notified() returns 0.
*/
void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck);
/**
* tegra_hv_ivc_get_info - Get info of Guest shared area
* @ivck IVC cookie of the queue
* @pa IPA of shared area
* @size Size of the shared area
*
* Get info (IPA and size) of Guest shared area
*
* Returns size on success and an error code otherwise
*/
int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, uint64_t *pa,
uint64_t *size);
/**
* tegra_hv_ivc_notify - Notify remote guest
* @ivck IVC cookie of the queue
*
* Notify remote guest
*
*/
void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck);
struct tegra_ivc *tegra_hv_ivc_convert_cookie(struct tegra_hv_ivc_cookie *ivck);
#endif /* __TEGRA_HV_IVC_H */

View File

@@ -0,0 +1,489 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_SYSCALLS_H__
#define __TEGRA_SYSCALLS_H__
#include <soc/tegra/virt/tegra_hv_sysmgr.h>
#define HVC_NR_READ_STAT 1
#define HVC_NR_READ_IVC 2
#define HVC_NR_READ_GID 3
#define HVC_NR_RAISE_IRQ 4
#define HVC_NR_READ_NGUESTS 5
#define HVC_NR_READ_IPA_PA 6
#define HVC_NR_READ_GUEST_STATE 7
#define HVC_NR_READ_HYP_INFO 9
#define HVC_NR_GUEST_RESET 10
#define HVC_NR_SYSINFO_IPA 13
#define HVC_NR_TRACE_GET_EVENT_MASK 0x8003U
#define HVC_NR_TRACE_SET_EVENT_MASK 0x8004U
#define GUEST_PRIMARY 0
#define GUEST_IVC_SERVER 0
#define HVC_NR_CPU_FREQ 0xC6000022
#define NGUESTS_MAX 16
#ifndef __ASSEMBLY__
#if defined(__KERNEL__)
#include <linux/types.h>
#endif
struct tegra_hv_queue_data {
uint32_t id; /* IVC id */
uint32_t peers[2];
uint32_t size;
uint32_t nframes;
uint32_t frame_size;
uint32_t offset;
uint16_t irq, raise_irq;
uint64_t trap_ipa; /** @brief IO address used to notify peer endpoint */
uint64_t msi_ipa; /** @brief MSI address used to notify peer endpoint */
};
struct ivc_mempool {
uint64_t pa;
uint64_t size;
uint32_t id;
uint32_t peer_vmid;
};
struct ivc_shared_area {
uint64_t pa;
uint64_t size;
uint32_t guest;
uint16_t free_irq_start;
uint16_t free_irq_count;
};
struct ivc_info_page {
uint32_t nr_queues;
uint32_t nr_areas;
uint32_t nr_mempools;
uint32_t padding; /**< @brief reserved for internal use */
// IMPORTANT: Padding is needed to align
// sizeof(struct ivc_info_page ) to 64 bits
uint64_t trap_region_base_ipa; /**< @brief MMIO trap region start address */
uint64_t trap_region_size; /**< @brief MMIO trap region size */
uint64_t trap_ipa_stride; /**< @brief MMIO trap IPA stride size */
uint64_t msi_region_base_ipa; /**< @brief MMIO msi region start address */
uint64_t msi_region_size; /**< @brief MMIO msi region size */
uint64_t msi_ipa_stride; /**< @brief MMIO msi IPA stride size */
/* The actual length of this array is nr_areas. */
struct ivc_shared_area areas[];
/*
* Following the shared array is an array of queue data structures with
* an entry per queue that is assigned to the guest. This array is
* terminated by an entry with no frames.
*
* struct tegra_hv_queue_data queue_data[nr_queues];
*/
/*
* Following the queue data array is an array of mempool structures
* with an entry per mempool assigned to the guest.
*
* struct ivc_mempool[nr_mempools];
*/
};
static inline struct ivc_shared_area *ivc_shared_area_addr(
const struct ivc_info_page *info, uint32_t area_num)
{
return ((struct ivc_shared_area *) (((uintptr_t) info) + sizeof(*info)))
+ area_num;
}
static inline const struct tegra_hv_queue_data *ivc_info_queue_array(
const struct ivc_info_page *info)
{
return (struct tegra_hv_queue_data *)&info->areas[info->nr_areas];
}
static inline const struct ivc_mempool *ivc_info_mempool_array(
const struct ivc_info_page *info)
{
return (struct ivc_mempool *)
&ivc_info_queue_array(info)[info->nr_queues];
}
struct hyp_ipa_pa_info {
uint64_t base; /* base of contiguous pa region */
uint64_t offset; /* offset for requested ipa address */
uint64_t size; /* size of pa region */
};
#define HVC_MAX_VCPU 64
struct trapped_access {
uint64_t ipa;
uint32_t size;
int32_t write_not_read;
uint64_t data;
uint32_t guest_id;
};
struct hyp_server_page {
/* guest reset protocol */
uint32_t guest_reset_virq;
/* boot delay offsets per VM needed by monitor partition */
uint32_t boot_delay[NGUESTS_MAX];
/* hypervisor trace log */
uint64_t log_ipa;
uint32_t log_size;
/* secure-hypervisor trace log */
uint64_t secure_log_ipa;
uint32_t secure_log_size;
/* PCT data */
uint64_t pct_ipa;
uint64_t pct_size;
/* check if the VM is a server or a guest */
uint32_t is_server_vm;
/* golden register data */
uint64_t gr_ipa;
uint32_t gr_size;
/* all vm mappings ipa */
uint64_t mappings_ipa;
};
/* For backwards compatibility, alias the old name for hyp_server_name. */
#define hyp_info_page hyp_server_page
#ifdef CONFIG_ARM64
#define _X3_X17 "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", \
"x13", "x14", "x15", "x16", "x17"
#define _X4_X17 "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", \
"x13", "x14", "x15", "x16", "x17"
static inline int hyp_read_gid(unsigned int *gid)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_GID)
: "x2", "x3", _X4_X17);
*gid = r1;
return (int)r0;
}
static inline int hyp_read_nguests(unsigned int *nguests)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_NGUESTS)
: "x2", "x3", _X4_X17);
*nguests = r1;
return (int)r0;
}
static inline int hyp_read_ivc_info(uint64_t *ivc_info_page_pa)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_IVC)
: "x2", "x3", _X4_X17);
*ivc_info_page_pa = r1;
return (int)r0;
}
static inline int hyp_read_ipa_pa_info(struct hyp_ipa_pa_info *info,
unsigned int guestid, uint64_t ipa)
{
register uint64_t r0 asm("x0") = guestid;
register uint64_t r1 asm("x1") = ipa;
register uint64_t r2 asm("x2");
register uint64_t r3 asm("x3");
asm("hvc %4"
: "+r"(r0), "+r"(r1), "=r"(r2), "=r"(r3)
: "i"(HVC_NR_READ_IPA_PA)
: _X4_X17);
info->base = r1;
info->offset = r2;
info->size = r3;
return (int)r0;
}
static inline int hyp_raise_irq(unsigned int irq, unsigned int vmid)
{
register uint64_t r0 asm("x0") = irq;
register uint64_t r1 asm("x1") = vmid;
asm volatile("hvc %1"
: "+r"(r0)
: "i"(HVC_NR_RAISE_IRQ), "r"(r1)
: "x2", "x3", _X4_X17);
return (int)r0;
}
static inline int hyp_read_guest_state(unsigned int vmid, unsigned int *state)
{
register uint64_t r0 asm("x0") = vmid;
register uint64_t r1 asm("x1");
asm("hvc %2"
: "+r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_GUEST_STATE)
: "x2", _X3_X17);
*state = (unsigned int)r1;
return (int)r0;
}
static inline int hyp_read_hyp_info(uint64_t *hyp_info_page_pa)
{
register uint64_t r0 asm("x0");
register uint64_t r1 asm("x1");
asm("hvc %2"
: "=r"(r0), "=r"(r1)
: "i"(HVC_NR_READ_HYP_INFO)
: "x2", "x3", _X4_X17);
*hyp_info_page_pa = r1;
return (int)r0;
}
static inline int hyp_guest_reset(unsigned int id,
struct hyp_sys_state_info *out)
{
register uint64_t r0 asm("x0") = id;
register uint64_t r1 asm("x1");
register uint64_t r2 asm("x2");
register uint64_t r3 asm("x3");
asm volatile("hvc %4"
: "+r"(r0), "=r"(r1),
"=r"(r2), "=r"(r3)
: "i"(HVC_NR_GUEST_RESET)
: _X4_X17);
if (out != NULL) {
out->sys_transition_mask = (uint32_t)r1;
out->vm_shutdown_mask = (uint32_t)r2;
out->vm_reboot_mask = (uint32_t)r3;
}
return (int)r0;
}
static inline uint64_t hyp_sysinfo_ipa(void)
{
register uint64_t r0 asm("x0");
asm("hvc %1"
: "=r"(r0)
: "i"(HVC_NR_SYSINFO_IPA)
: "x1", "x2", "x3", _X4_X17);
return r0;
}
static inline int hyp_read_freq_feedback(uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 1U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1 && value != NULL)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_read_freq_request(uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 0U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1 && value != NULL)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_write_freq_request(uint64_t value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 2U;
register uint64_t r2 asm("x2") = value;
asm volatile("hvc #0"
: "+r"(r0)
: "r"(r1), "r"(r2)
: "x3", _X4_X17);
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_read_freq_feedback(uint8_t cpu_id,
uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 4U;
register uint64_t r2 asm("x2") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
: "r"(r2)
: "x3", _X4_X17);
if (r0 == 1 && value != 0)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_read_freq_request(uint8_t cpu_id,
uint64_t *value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 3U;
register uint64_t r2 asm("x2") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
: "r"(r2)
: "x3", _X4_X17);
if (r0 == 1 && value != 0)
*value = r1;
return (int16_t)r0;
}
static inline int hyp_pct_cpu_id_write_freq_request(uint8_t cpu_id,
uint64_t value)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 5U;
register uint64_t r2 asm("x2") = value;
register uint64_t r3 asm("x3") = cpu_id;
asm volatile("hvc #0"
: "+r"(r0)
: "r"(r1), "r"(r2), "r"(r3)
: _X4_X17);
return (int16_t)r0;
}
static inline uint8_t hyp_get_cpu_count(void)
{
register uint64_t r0 asm("x0") = HVC_NR_CPU_FREQ;
register uint64_t r1 asm("x1") = 6U;
asm volatile("hvc #0"
: "+r"(r0), "+r"(r1)
:
: "x2", "x3", _X4_X17);
if (r0 == 1)
return r1;
return 0;
}
static __attribute__((always_inline)) inline void hyp_call44(uint16_t id,
uint64_t args[4])
{
register uint64_t x0 asm("x0") = args[0];
register uint64_t x1 asm("x1") = args[1];
register uint64_t x2 asm("x2") = args[2];
register uint64_t x3 asm("x3") = args[3];
asm volatile("HVC %[imm16]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
:
[imm16] "i"(((uint32_t)id)));
args[0] = x0;
args[1] = x1;
args[2] = x2;
args[3] = x3;
}
static inline int hyp_trace_get_mask(uint64_t *value)
{
uint64_t args[4] = { 0U, 0U, 0U, 0U };
hyp_call44(HVC_NR_TRACE_GET_EVENT_MASK, args);
if (args[0] == 0U)
*value = args[1];
return (int) args[0];
}
static inline int hyp_trace_set_mask(uint64_t mask)
{
uint64_t args[4] = { mask, 0U, 0U, 0U };
hyp_call44(HVC_NR_TRACE_SET_EVENT_MASK, args);
return (int) args[0];
}
#undef _X3_X17
#undef _X4_X17
#else
int hyp_read_gid(unsigned int *gid);
int hyp_read_nguests(unsigned int *nguests);
int hyp_read_ivc_info(uint64_t *ivc_info_page_pa);
int hyp_read_ipa_pa_info(struct hyp_ipa_pa_info *info, int guestid,
uint64_t ipa);
int hyp_raise_irq(unsigned int irq, unsigned int vmid);
uint64_t hyp_sysinfo_ipa(void);
/* ASM prototypes */
extern int hvc_read_gid(void *);
extern int hvc_read_ivc_info(int *);
extern int hvc_read_ipa_pa_info(void *, int guestid, uint64_t ipa);
extern int hvc_read_nguests(void *);
extern int hvc_raise_irq(unsigned int irq, unsigned int vmid);
#endif /* CONFIG_ARCH_ARM64 */
#endif /* !__ASSEMBLY__ */
#endif /* __TEGRA_SYSCALLS_H__ */

View File

@@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef _TEGRA_HV_SYSMGR_H
#define _TEGRA_HV_SYSMGR_H
#include <linux/types.h>
#define SYSMGR_IVCMSG_SIZE_MAX 64
enum hv_sysmgr_msg_type {
HV_SYSMGR_MSG_TYPE_GUEST_EVENT = 1,
HV_SYSMGR_MSG_TYPE_VM_PM_CTL_CMD = 2,
HV_SYSMGR_MSG_TYPE_INVALID
};
enum hv_sysmgr_cmd_id {
HV_SYSMGR_CMD_NORMAL_SHUTDOWN = 0x0,
HV_SYSMGR_CMD_NORMAL_REBOOT = 0x1,
HV_SYSMGR_CMD_NORMAL_SUSPEND = 0x2,
HV_SYSMGR_CMD_NORMAL_RESUME = 0x3,
HV_SYSMGR_CMD_INVALID = 0xFFFFFFFF,
};
enum hv_sysmgr_resp_id {
HV_SYSMGR_RESP_ACCEPTED = 0x0,
HV_SYSMGR_RESP_UNKNOWN_COMMAND = 0xF,
};
/* This struct comes as payload of hv_pm_ctl_message */
struct hv_sysmgr_command {
uint32_t cmd_id;
uint32_t resp_id;
} __packed;
struct hv_sysmgr_message {
/* msg class */
uint32_t msg_type;
/* id of open socket */
uint32_t socket_id;
/* client data area. Payload */
uint8_t client_data[SYSMGR_IVCMSG_SIZE_MAX];
} __packed;
/*
* QUERY_SYSTEM_STATE COMMAND DATA LAYOUT
*/
struct hyp_sys_state_info {
/* Indicates System State Transition */
uint32_t sys_transition_mask;
/* Indicates which VM shutdown request is pending */
uint32_t vm_shutdown_mask;
/* Indicates which VM reboot request is pending */
uint32_t vm_reboot_mask;
/* Indicates which VM suspend request is pending */
uint32_t vm_suspend_phase_1_mask;
uint32_t vm_suspend_phase_2_mask;
/* Indicates which VM resume request is pending */
uint32_t vm_resume_mask;
};
/*
* Power management calls ID's used by SYSMGR to manage LOCAL/GLOBAL EVENTS
*/
enum system_function_id {
INVALID_FUNC,
/*
* This is used to get reboot/shutdown masks per VM from hypervisor.
* Hypervisor updates state fields on a PSCI event from the VM.
*/
QUERY_SYSTEM_STATE,
GUEST_SHUTDOWN_INIT,
GUEST_SHUTDOWN_COMPLETE,
GUEST_REBOOT_INIT,
GUEST_REBOOT_CONTINUE,
GUEST_REBOOT_COMPLETE,
SYSTEM_SHUTDOWN_INIT,
SYSTEM_SHUTDOWN_COMPLETE,
SYSTEM_REBOOT_INIT,
SYSTEM_REBOOT_COMPLETE,
GUEST_SUSPEND_REQ,
GUEST_SUSPEND_INIT,
GUEST_SUSPEND_COMPLETE,
GUEST_RESUME_INIT,
GUEST_RESUME_COMPLETE,
GUEST_PAUSE,
SYSTEM_SUSPEND_INIT,
SYSTEM_SUSPEND_COMPLETE,
MAX_FUNC_ID,
};
typedef enum {
VM_STATE_BOOT,
VM_STATE_HALT,
VM_STATE_UNHALT,
VM_STATE_REBOOT,
VM_STATE_SHUTDOWN,
VM_STATE_SUSPEND,
VM_STATE_RESUME,
VM_STATE_INVALID,
VM_STATE_MAX
} vm_state;
#define CREATE_CMD(func_id, vmid) ((func_id << 24U) | vmid)
#define QUERY_CMD CREATE_CMD(QUERY_SYSTEM_STATE, 0)
#define GUEST_SHUTDOWN_INIT_CMD(vmid) CREATE_CMD(GUEST_SHUTDOWN_INIT, vmid)
#define GUEST_SHUTDOWN_COMPLETE_CMD(vmid) \
CREATE_CMD(GUEST_SHUTDOWN_COMPLETE, vmid)
#define GUEST_REBOOT_INIT_CMD(vmid) CREATE_CMD(GUEST_REBOOT_INIT, vmid)
#define GUEST_REBOOT_CONTINUE_CMD(vmid) CREATE_CMD(GUEST_REBOOT_CONTINUE, vmid)
#define GUEST_REBOOT_COMPLETE_CMD(vmid) CREATE_CMD(GUEST_REBOOT_COMPLETE, vmid)
#define SYS_SHUTDOWN_INIT_CMD CREATE_CMD(SYSTEM_SHUTDOWN_INIT, 0)
#define SYS_SHUTDOWN_COMPLETE_CMD CREATE_CMD(SYSTEM_SHUTDOWN_COMPLETE, 0)
#define SYS_REBOOT_INIT_CMD CREATE_CMD(SYSTEM_REBOOT_INIT, 0)
#define SYS_REBOOT_COMPLETE_CMD CREATE_CMD(SYSTEM_REBOOT_COMPLETE, 0)
#define GUEST_SUSPEND_REQ_CMD(vmid) CREATE_CMD(GUEST_SUSPEND_REQ,vmid)
#define GUEST_SUSPEND_INIT_CMD(vmid) CREATE_CMD(GUEST_SUSPEND_INIT,vmid)
#define GUEST_SUSPEND_COMPLETE_CMD(vmid) \
CREATE_CMD(GUEST_SUSPEND_COMPLETE,vmid)
#define GUEST_RESUME_INIT_CMD(vmid) CREATE_CMD(GUEST_RESUME_INIT,vmid)
#define GUEST_RESUME_COMPLETE_CMD(vmid) CREATE_CMD(GUEST_RESUME_COMPLETE,vmid)
#define GUEST_PAUSE_CMD(vmid) CREATE_CMD(GUEST_PAUSE, vmid)
#define SYS_SUSPEND_INIT_CMD CREATE_CMD(SYSTEM_SUSPEND_INIT, 0)
#define SYS_SUSPEND_COMPLETE_CMD CREATE_CMD(SYSTEM_SUSPEND_COMPLETE, 0)
#endif /* _TEGRA_HV_SYSMGR_H */