mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-23 01:31:30 +03:00
VSC: fix lcpu to vcpu conversion logic
-fix lcpu to vcpu conversion logic by considering the missing cluster or holes also. -if the conversion is not successful then we are not pinning the VSCD threads to lcpu. -populate lcpu to vcpu table once. Bug 4372079 Change-Id: I8379496da1b7f85d984e1bac1680f830740fea09 Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3015784 Reviewed-by: Suresh Venkatachalam <skathirampat@nvidia.com> Reviewed-by: Vipin Kumar <vipink@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3260665 Reviewed-by: Sumeet Gupta <sumeetg@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
Jon Hunter
parent
b918dd5111
commit
dc071fdd4c
@@ -6,6 +6,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/limits.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/kernel.h> /* printk() */
|
#include <linux/kernel.h> /* printk() */
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
@@ -64,6 +65,11 @@ static uint32_t total_instance_id;
|
|||||||
|
|
||||||
static int vblk_major;
|
static int vblk_major;
|
||||||
|
|
||||||
|
static uint32_t lcpu_to_vcpus[CPUS_PER_CLUSTER * MAX_NUM_CLUSTERS];
|
||||||
|
atomic_t vcpu_init_info;
|
||||||
|
static DEFINE_MUTEX(vcpu_lock);
|
||||||
|
static struct semaphore mpidr_sem;
|
||||||
|
|
||||||
static inline uint64_t _arch_counter_get_cntvct(void)
|
static inline uint64_t _arch_counter_get_cntvct(void)
|
||||||
{
|
{
|
||||||
uint64_t cval;
|
uint64_t cval;
|
||||||
@@ -97,58 +103,22 @@ exit:
|
|||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t read_mpidr(void)
|
static uint32_t convert_lcpu_to_vcpu(struct vblk_dev *vblkdev, uint32_t lcpu_affinity)
|
||||||
{
|
{
|
||||||
uint64_t mpidr;
|
uint32_t cnt, vcpu = U32_MAX;
|
||||||
__asm volatile("MRS %0, MPIDR_EL1 " : "=r"(mpidr) :: "memory");
|
int adj_lcpu = -1;
|
||||||
return mpidr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint64_t read_mpidr_cluster(uint64_t mpidr)
|
/* search for vcpu corresponding to lcpu_affinity */
|
||||||
{
|
for (cnt = 0; cnt < MAX_NUM_CLUSTERS * CPUS_PER_CLUSTER; cnt++) {
|
||||||
return (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
|
if (lcpu_to_vcpus[cnt] != U32_MAX) {
|
||||||
}
|
/* calculating adjusted lcpu */
|
||||||
|
adj_lcpu++;
|
||||||
|
|
||||||
static uint64_t read_mpidr_core(uint64_t mpidr)
|
if (adj_lcpu == lcpu_affinity) {
|
||||||
{
|
vcpu = lcpu_to_vcpus[cnt];
|
||||||
return (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long get_cpu_info(void *data)
|
|
||||||
{
|
|
||||||
struct vblk_dev *vblkdev = (struct vblk_dev *)data;
|
|
||||||
|
|
||||||
vblkdev->g_mpidr = read_mpidr();
|
|
||||||
vblkdev->g_cluster = read_mpidr_cluster(vblkdev->g_mpidr);
|
|
||||||
vblkdev->g_core = read_mpidr_core(vblkdev->g_mpidr);
|
|
||||||
|
|
||||||
up(&vblkdev->mpidr_sem);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t convert_lcpu_to_vcpu(struct vblk_dev *vblkdev, uint32_t lcpu)
|
|
||||||
{
|
|
||||||
uint32_t num_cores = num_present_cpus();
|
|
||||||
uint32_t l_cluster, l_core;
|
|
||||||
uint32_t cnt, vcpu;
|
|
||||||
|
|
||||||
/* get cluster and core in cluster from lcpu */
|
|
||||||
l_cluster = lcpu / MAX_NUM_CLUSTERS;
|
|
||||||
l_core = lcpu % CPUS_PER_CLUSTER;
|
|
||||||
|
|
||||||
for (cnt = 0; cnt < num_cores; cnt++) {
|
|
||||||
down(&vblkdev->mpidr_sem);
|
|
||||||
work_on_cpu(cnt, get_cpu_info, vblkdev);
|
|
||||||
down(&vblkdev->mpidr_sem);
|
|
||||||
|
|
||||||
if (vblkdev->g_cluster == l_cluster && vblkdev->g_core == l_core) {
|
|
||||||
vcpu = cnt;
|
|
||||||
up(&vblkdev->mpidr_sem);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
up(&vblkdev->mpidr_sem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return vcpu;
|
return vcpu;
|
||||||
@@ -1603,8 +1573,6 @@ static void vblk_init_device(struct work_struct *ws)
|
|||||||
lcpu_affinity = 2;
|
lcpu_affinity = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* convert lcpu to vcpu */
|
|
||||||
vblkdev->vcpu_affinity = convert_lcpu_to_vcpu(vblkdev, lcpu_affinity);
|
|
||||||
ret = snprintf(vblk_comm, VBLK_DEV_THREAD_NAME_LEN - 4, "vblkdev%d:%d",
|
ret = snprintf(vblk_comm, VBLK_DEV_THREAD_NAME_LEN - 4, "vblkdev%d:%d",
|
||||||
vblkdev->devnum, vblkdev->config.priority);
|
vblkdev->devnum, vblkdev->config.priority);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -1612,11 +1580,23 @@ static void vblk_init_device(struct work_struct *ws)
|
|||||||
mutex_unlock(&vblkdev->ivc_lock);
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
strncat(vblk_comm, ":%u", 3);
|
|
||||||
|
|
||||||
/* create partition specific worker thread */
|
/* convert lcpu to vcpu */
|
||||||
vblkdev->vblk_kthread = kthread_create_on_cpu(&vblk_request_worker, vblkdev,
|
vblkdev->vcpu_affinity = convert_lcpu_to_vcpu(vblkdev, lcpu_affinity);
|
||||||
vblkdev->vcpu_affinity, vblk_comm);
|
if (vblkdev->vcpu_affinity != U32_MAX) {
|
||||||
|
strncat(vblk_comm, ":%u", 3);
|
||||||
|
|
||||||
|
/* create partition specific worker thread */
|
||||||
|
vblkdev->vblk_kthread = kthread_create_on_cpu(&vblk_request_worker, vblkdev,
|
||||||
|
vblkdev->vcpu_affinity, vblk_comm);
|
||||||
|
} else {
|
||||||
|
/* create partition specific worker thread.
|
||||||
|
* If the conversion is not successful
|
||||||
|
* do not bound kthread to any cpu
|
||||||
|
*/
|
||||||
|
dev_info(vblkdev->device, "vsc kthread not bound to any cpu\n");
|
||||||
|
vblkdev->vblk_kthread = kthread_create(&vblk_request_worker, vblkdev, vblk_comm);
|
||||||
|
}
|
||||||
if (IS_ERR(vblkdev->vblk_kthread)) {
|
if (IS_ERR(vblkdev->vblk_kthread)) {
|
||||||
dev_err(vblkdev->device, "Cannot allocate vblk worker thread\n");
|
dev_err(vblkdev->device, "Cannot allocate vblk worker thread\n");
|
||||||
mutex_unlock(&vblkdev->ivc_lock);
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
@@ -1670,6 +1650,66 @@ static void tegra_create_timers(struct vblk_dev *vblkdev)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t read_mpidr(void)
|
||||||
|
{
|
||||||
|
uint64_t mpidr;
|
||||||
|
__asm volatile("MRS %0, MPIDR_EL1 " : "=r"(mpidr) :: "memory");
|
||||||
|
return mpidr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t read_mpidr_cluster(uint64_t mpidr)
|
||||||
|
{
|
||||||
|
return (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t read_mpidr_core(uint64_t mpidr)
|
||||||
|
{
|
||||||
|
return (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long get_cpu_info(void *data)
|
||||||
|
{
|
||||||
|
uint64_t l_mpidr, l_cluster, l_core;
|
||||||
|
uint32_t lcpu;
|
||||||
|
|
||||||
|
l_mpidr = read_mpidr();
|
||||||
|
l_cluster = read_mpidr_cluster(l_mpidr);
|
||||||
|
l_core = read_mpidr_core(l_mpidr);
|
||||||
|
|
||||||
|
lcpu = l_cluster * CPUS_PER_CLUSTER + l_core;
|
||||||
|
lcpu_to_vcpus[lcpu] = smp_processor_id();
|
||||||
|
|
||||||
|
up(&mpidr_sem);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void populate_lcpu_to_vcpu_info(struct vblk_dev *vblkdev)
|
||||||
|
{
|
||||||
|
uint32_t num_vcpus = num_present_cpus();
|
||||||
|
uint32_t cnt, lcpu;
|
||||||
|
|
||||||
|
/* initialize all clusters including holes */
|
||||||
|
for (lcpu = 0; lcpu < MAX_NUM_CLUSTERS * CPUS_PER_CLUSTER; lcpu++)
|
||||||
|
lcpu_to_vcpus[lcpu] = U32_MAX;
|
||||||
|
|
||||||
|
/* queuing API on each present vcpus serially
|
||||||
|
* by down and up semaphore operation
|
||||||
|
*/
|
||||||
|
for (cnt = 0; cnt < num_vcpus; cnt++) {
|
||||||
|
down(&mpidr_sem);
|
||||||
|
work_on_cpu(cnt, get_cpu_info, vblkdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* down and up operation to make sure get_cpu_info API
|
||||||
|
* gets exuected on last vcpu successfully before exiting function
|
||||||
|
*/
|
||||||
|
down(&mpidr_sem);
|
||||||
|
up(&mpidr_sem);
|
||||||
|
|
||||||
|
atomic_inc(&vcpu_init_info);
|
||||||
|
}
|
||||||
|
|
||||||
static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
static struct device_node *vblk_node;
|
static struct device_node *vblk_node;
|
||||||
@@ -1736,7 +1776,7 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
|||||||
spin_lock_init(&vblkdev->queue_lock);
|
spin_lock_init(&vblkdev->queue_lock);
|
||||||
mutex_init(&vblkdev->ioctl_lock);
|
mutex_init(&vblkdev->ioctl_lock);
|
||||||
mutex_init(&vblkdev->ivc_lock);
|
mutex_init(&vblkdev->ivc_lock);
|
||||||
sema_init(&vblkdev->mpidr_sem, 1);
|
sema_init(&mpidr_sem, 1);
|
||||||
|
|
||||||
INIT_WORK(&vblkdev->init, vblk_init_device);
|
INIT_WORK(&vblkdev->init, vblk_init_device);
|
||||||
|
|
||||||
@@ -1746,6 +1786,11 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
|||||||
/* Create timers for each request going to storage server*/
|
/* Create timers for each request going to storage server*/
|
||||||
tegra_create_timers(vblkdev);
|
tegra_create_timers(vblkdev);
|
||||||
|
|
||||||
|
mutex_lock(&vcpu_lock);
|
||||||
|
if (atomic_read(&vcpu_init_info) == 0)
|
||||||
|
populate_lcpu_to_vcpu_info(vblkdev);
|
||||||
|
mutex_unlock(&vcpu_lock);
|
||||||
|
|
||||||
if (devm_request_irq(vblkdev->device, vblkdev->ivck->irq,
|
if (devm_request_irq(vblkdev->device, vblkdev->ivck->irq,
|
||||||
ivc_irq_handler, 0, "vblk", vblkdev)) {
|
ivc_irq_handler, 0, "vblk", vblkdev)) {
|
||||||
dev_err(dev, "Failed to request irq %d\n", vblkdev->ivck->irq);
|
dev_err(dev, "Failed to request irq %d\n", vblkdev->ivck->irq);
|
||||||
|
|||||||
@@ -97,13 +97,6 @@ struct vblk_dev {
|
|||||||
uint32_t ivc_id;
|
uint32_t ivc_id;
|
||||||
uint32_t ivm_id;
|
uint32_t ivm_id;
|
||||||
uint32_t vcpu_affinity;
|
uint32_t vcpu_affinity;
|
||||||
|
|
||||||
/* members for converting lcpu to vcpu */
|
|
||||||
struct semaphore mpidr_sem;
|
|
||||||
uint64_t g_mpidr;
|
|
||||||
uint64_t g_cluster;
|
|
||||||
uint64_t g_core;
|
|
||||||
|
|
||||||
struct tegra_hv_ivc_cookie *ivck;
|
struct tegra_hv_ivc_cookie *ivck;
|
||||||
struct tegra_hv_ivm_cookie *ivmk;
|
struct tegra_hv_ivm_cookie *ivmk;
|
||||||
uint32_t devnum;
|
uint32_t devnum;
|
||||||
|
|||||||
Reference in New Issue
Block a user