misc: mods: update MODS kernel driver to 3.96

Summary: Mods has released the latest kernel
driver in perforce. We need to make sure the
driver in git is at parity with the perforce
version

Change-Id: Ic3f1ab372574af7b61aa9736b33fb38a8c720ada
Signed-off-by: Ellis Roberts <ellisr@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2261293
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Chris Dragan <kdragan@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
root
2019-12-12 12:11:02 -08:00
committed by Laxman Dewangan
parent 726aeb070b
commit 49a69b4202
4 changed files with 340 additions and 59 deletions

View File

@@ -25,7 +25,7 @@
/* Driver version */
#define MODS_DRIVER_VERSION_MAJOR 3
#define MODS_DRIVER_VERSION_MINOR 94
#define MODS_DRIVER_VERSION_MINOR 96
#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
((MODS_DRIVER_VERSION_MINOR/10) << 4) | \
(MODS_DRIVER_VERSION_MINOR%10))
@@ -391,6 +391,12 @@ struct MODS_PCI_BUS_ADD_DEVICES {
__u32 bus;
};
/* MODS_ESC_PCI_BUS_RESCAN */
struct MODS_PCI_BUS_RESCAN {
__u16 domain;
__u16 bus;
};
/* MODS_ESC_PCI_MAP_RESOURCE */
struct MODS_PCI_MAP_RESOURCE {
/* IN */
@@ -794,11 +800,27 @@ struct MODS_CLOCK_ENABLED {
__u32 enable_count;
};
#define MAX_CPU_MASKS_3 128 /* CPU indices can be at most 4096 apart */
/* MODS_ESC_DEVICE_NUMA_INFO_3 */
struct MODS_DEVICE_NUMA_INFO_3 {
/* IN */
struct mods_pci_dev_2 pci_device;
/* OUT */
__s32 node;
__u32 node_count;
__u32 cpu_count;
__u32 first_cpu_mask_offset;
__u32 node_cpu_mask[MAX_CPU_MASKS_3];
};
#if defined(CONFIG_PPC64) || defined(PPC64LE)
#define MAX_CPU_MASKS 64 /* 32 masks of 32bits = 2048 CPUs max */
#else
#define MAX_CPU_MASKS 32 /* 32 masks of 32bits = 1024 CPUs max */
#endif
/* MODS_ESC_DEVICE_NUMA_INFO_2 */
struct MODS_DEVICE_NUMA_INFO_2 {
/* IN */
@@ -1208,6 +1230,7 @@ struct MODS_MSR {
_IOWR(MODS_IOC_MAGIC, 21, struct MODS_IRQ)
#define MODS_ESC_SET_MEMORY_TYPE \
_IOW(MODS_IOC_MAGIC, 22, struct MODS_MEMORY_TYPE)
/* Deprecated */
#define MODS_ESC_PCI_BUS_ADD_DEVICES \
_IOW(MODS_IOC_MAGIC, 23, struct MODS_PCI_BUS_ADD_DEVICES)
#define MODS_ESC_REGISTER_IRQ \
@@ -1443,5 +1466,9 @@ struct MODS_MSR {
_IOWR(MODS_IOC_MAGIC, 124, struct MODS_ALLOC_PAGES_2)
#define MODS_ESC_MERGE_PAGES \
_IOWR(MODS_IOC_MAGIC, 125, struct MODS_MERGE_PAGES)
#define MODS_ESC_DEVICE_NUMA_INFO_3 \
_IOWR(MODS_IOC_MAGIC, 126, struct MODS_DEVICE_NUMA_INFO_3)
#define MODS_ESC_PCI_BUS_RESCAN \
_IOW(MODS_IOC_MAGIC, 127, struct MODS_PCI_BUS_RESCAN)
#endif /* _MODS_H_ */

View File

@@ -308,12 +308,6 @@ struct mods_priv {
#define MODS_PGPROT_UC pgprot_noncached
#define MODS_PGPROT_WC pgprot_writecombine
/* VMA */
#define MODS_VMA_PGOFF(vma) ((vma)->vm_pgoff)
#define MODS_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start)
#define MODS_VMA_OFFSET(vma) (((u64)(vma)->vm_pgoff) << PAGE_SHIFT)
#define MODS_VMA_FILE(vma) ((vma)->vm_file)
/* Xen adds a translation layer between the physical address
* and real system memory address space.
*
@@ -487,6 +481,8 @@ int esc_mods_pci_read_2(struct mods_client *client, struct MODS_PCI_READ_2 *p);
int esc_mods_pci_write(struct mods_client *client, struct MODS_PCI_WRITE *p);
int esc_mods_pci_write_2(struct mods_client *client,
struct MODS_PCI_WRITE_2 *p);
int esc_mods_pci_bus_rescan(struct mods_client *client,
struct MODS_PCI_BUS_RESCAN *p);
int esc_mods_pci_bus_add_dev(struct mods_client *client,
struct MODS_PCI_BUS_ADD_DEVICES *p);
int esc_mods_pci_bus_remove_dev(struct mods_client *client,
@@ -499,6 +495,8 @@ int esc_mods_device_numa_info(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO *p);
int esc_mods_device_numa_info_2(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO_2 *p);
int esc_mods_device_numa_info_3(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO_3 *p);
int esc_mods_get_iommu_state(struct mods_client *client,
struct MODS_GET_IOMMU_STATE *state);
int esc_mods_get_iommu_state_2(struct mods_client *client,

View File

@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/random.h>
#include <linux/sched.h>
@@ -498,9 +499,35 @@ static void mods_unregister_mapping(struct mods_client *client,
return;
}
}
LOG_EXT();
}
#ifdef CONFIG_HAVE_IOREMAP_PROT
static struct SYS_MAP_MEMORY *mods_find_mapping(struct mods_client *client,
u64 virtual_address)
{
struct SYS_MAP_MEMORY *p_map_mem;
struct list_head *head = &client->mem_map_list;
struct list_head *iter;
LOG_ENT();
list_for_each(iter, head) {
p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
if (p_map_mem->virtual_addr == virtual_address) {
LOG_EXT();
return p_map_mem;
}
}
LOG_EXT();
return NULL;
}
#endif
static void mods_unregister_all_mappings(struct mods_client *client)
{
struct SYS_MAP_MEMORY *p_map_mem;
@@ -629,7 +656,7 @@ static void mods_krnl_vma_open(struct vm_area_struct *vma)
mods_debug_printk(DEBUG_MEM_DETAILED,
"open vma, virt 0x%lx, phys 0x%llx\n",
vma->vm_start,
(u64)(MODS_VMA_PGOFF(vma) << PAGE_SHIFT));
(u64)vma->vm_pgoff << PAGE_SHIFT);
priv = vma->vm_private_data;
if (priv)
@@ -669,7 +696,134 @@ static void mods_krnl_vma_close(struct vm_area_struct *vma)
LOG_EXT();
}
#ifdef CONFIG_HAVE_IOREMAP_PROT
static int mods_krnl_vma_access(struct vm_area_struct *vma,
unsigned long addr,
void *buf,
int len,
int write)
{
int err = OK;
struct mods_vm_private_data *priv = vma->vm_private_data;
struct mods_client *client;
struct SYS_MAP_MEMORY *p_map_mem;
u64 map_offs;
LOG_ENT();
if (!priv) {
LOG_EXT();
return -EINVAL;
}
mods_debug_printk(DEBUG_MEM_DETAILED,
"access vma, virt 0x%lx, phys 0x%llx\n",
vma->vm_start,
(u64)vma->vm_pgoff << PAGE_SHIFT);
client = priv->client;
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
LOG_EXT();
return -EINTR;
}
p_map_mem = mods_find_mapping(client, vma->vm_start);
if (unlikely(!p_map_mem || addr < p_map_mem->virtual_addr ||
addr + len > p_map_mem->virtual_addr +
p_map_mem->mapping_length)) {
mutex_unlock(&client->mtx);
LOG_EXT();
return -ENOMEM;
}
map_offs = addr - vma->vm_start;
if (p_map_mem->p_mem_info) {
struct MODS_MEM_INFO *p_mem_info = p_map_mem->p_mem_info;
struct MODS_PHYS_CHUNK *chunk;
struct MODS_PHYS_CHUNK *end_chunk;
chunk = &p_mem_info->pages[0];
end_chunk = chunk + p_mem_info->num_chunks;
for ( ; chunk < end_chunk; chunk++) {
const u32 chunk_size = PAGE_SIZE << chunk->order;
if (!chunk->p_page) {
chunk = end_chunk;
break;
}
if (map_offs < chunk_size)
break;
map_offs -= chunk_size;
}
if (unlikely(chunk >= end_chunk))
err = -ENOMEM;
else {
void *ptr;
struct page *p_page = chunk->p_page +
(map_offs >> PAGE_SHIFT);
map_offs &= ~PAGE_MASK;
if (map_offs + len > PAGE_SIZE)
len = PAGE_SIZE - map_offs;
ptr = kmap(p_page);
if (ptr) {
char *bptr = (char *)ptr + map_offs;
if (write)
memcpy(bptr, buf, len);
else
memcpy(buf, bptr, len);
kunmap(ptr);
err = len;
} else
err = -ENOMEM;
}
} else if (!write) {
char __iomem *ptr;
u64 pa;
map_offs += (u64)vma->vm_pgoff << PAGE_SHIFT;
pa = map_offs & PAGE_MASK;
map_offs &= ~PAGE_MASK;
if (map_offs + len > PAGE_SIZE)
len = PAGE_SIZE - map_offs;
ptr = ioremap(pa, PAGE_SIZE);
if (ptr) {
memcpy_fromio(buf, ptr + map_offs, len);
iounmap(ptr);
err = len;
} else
err = -ENOMEM;
} else
/* Writing to device memory from gdb is not supported */
err = -ENOMEM;
mutex_unlock(&client->mtx);
LOG_EXT();
return err;
}
#endif
static const struct vm_operations_struct mods_krnl_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = mods_krnl_vma_access,
#endif
.open = mods_krnl_vma_open,
.close = mods_krnl_vma_close
};
@@ -832,12 +986,13 @@ static int mods_krnl_mmap(struct file *fp, struct vm_area_struct *vma)
static int mods_krnl_map_inner(struct mods_client *client,
struct vm_area_struct *vma)
{
u64 req_pa = MODS_VMA_OFFSET(vma);
const u64 req_pa = (u64)vma->vm_pgoff << PAGE_SHIFT;
struct MODS_MEM_INFO *p_mem_info = mods_find_alloc(client, req_pa);
u32 req_pages = MODS_VMA_SIZE(vma) >> PAGE_SHIFT;
const u64 vma_size = (u64)(vma->vm_end - vma->vm_start);
const u32 req_pages = vma_size >> PAGE_SHIFT;
if ((req_pa & ~PAGE_MASK) != 0 ||
(MODS_VMA_SIZE(vma) & ~PAGE_MASK) != 0) {
if ((req_pa & ~PAGE_MASK) != 0 ||
(vma_size & ~PAGE_MASK) != 0) {
mods_error_printk("requested mapping is not page-aligned\n");
return -EINVAL;
}
@@ -922,14 +1077,11 @@ static int mods_krnl_map_inner(struct mods_client *client,
have_pages -= map_pages;
}
/* MODS_VMA_OFFSET(vma) can change so it can't be used
* to register the mapping
*/
mods_register_mapping(client,
p_mem_info,
chunks[first].dma_addr,
vma->vm_start,
MODS_VMA_SIZE(vma));
vma_size);
} else {
/* device memory */
@@ -938,19 +1090,18 @@ static int mods_krnl_map_inner(struct mods_client *client,
"map dev: phys 0x%llx, virt 0x%lx, size 0x%lx, %s\n",
req_pa,
(unsigned long)vma->vm_start,
(unsigned long)MODS_VMA_SIZE(vma),
mods_get_prot_str_for_range(client, req_pa,
MODS_VMA_SIZE(vma)));
(unsigned long)vma_size,
mods_get_prot_str_for_range(client, req_pa, vma_size));
if (io_remap_pfn_range(
vma,
vma->vm_start,
req_pa>>PAGE_SHIFT,
MODS_VMA_SIZE(vma),
vma_size,
mods_get_prot_for_range(
client,
req_pa,
MODS_VMA_SIZE(vma),
vma_size,
vma->vm_page_prot))) {
mods_error_printk("failed to map device memory\n");
return -EAGAIN;
@@ -960,7 +1111,7 @@ static int mods_krnl_map_inner(struct mods_client *client,
NULL,
req_pa,
vma->vm_start,
MODS_VMA_SIZE(vma));
vma_size);
}
return OK;
}
@@ -1484,6 +1635,12 @@ static long mods_krnl_ioctl(struct file *fp,
MODS_PCI_WRITE_2);
break;
case MODS_ESC_PCI_BUS_RESCAN:
MODS_IOCTL_NORETVAL(MODS_ESC_PCI_BUS_RESCAN,
esc_mods_pci_bus_rescan,
MODS_PCI_BUS_RESCAN);
break;
case MODS_ESC_PCI_BUS_ADD_DEVICES:
MODS_IOCTL_NORETVAL(MODS_ESC_PCI_BUS_ADD_DEVICES,
esc_mods_pci_bus_add_dev,
@@ -1524,6 +1681,12 @@ static long mods_krnl_ioctl(struct file *fp,
MODS_DEVICE_NUMA_INFO_2);
break;
case MODS_ESC_DEVICE_NUMA_INFO_3:
MODS_IOCTL(MODS_ESC_DEVICE_NUMA_INFO_3,
esc_mods_device_numa_info_3,
MODS_DEVICE_NUMA_INFO_3);
break;
case MODS_ESC_GET_IOMMU_STATE:
MODS_IOCTL(MODS_ESC_GET_IOMMU_STATE,
esc_mods_get_iommu_state,

View File

@@ -498,32 +498,45 @@ int esc_mods_pci_write(struct mods_client *client,
int esc_mods_pci_bus_add_dev(struct mods_client *client,
struct MODS_PCI_BUS_ADD_DEVICES *scan)
{
struct MODS_PCI_BUS_RESCAN rescan = { 0, scan->bus };
return esc_mods_pci_bus_rescan(client, &rescan);
}
int esc_mods_pci_bus_rescan(struct mods_client *client,
struct MODS_PCI_BUS_RESCAN *rescan)
{
#ifndef MODS_HASNT_PCI_RESCAN_BUS
struct pci_bus *bus;
int err = OK;
int err = OK;
LOG_ENT();
mods_info_printk("scanning pci bus %02x\n", scan->bus);
mods_info_printk("scanning pci bus %04x:%02x\n",
rescan->domain, rescan->bus);
bus = pci_find_bus(0, scan->bus);
bus = pci_find_bus(rescan->domain, rescan->bus);
if (likely(bus)) {
/* initiate a PCI bus scan to find hotplugged PCI devices
* in domain 0
*/
pci_scan_child_bus(bus);
/* add newly found devices */
pci_bus_add_devices(bus);
#ifndef MODS_HASNT_PCI_LOCK_RESCAN_REMOVE
pci_lock_rescan_remove();
#endif
pci_rescan_bus(bus);
#ifndef MODS_HASNT_PCI_LOCK_RESCAN_REMOVE
pci_unlock_rescan_remove();
#endif
} else {
mods_error_printk("bus %02x not found\n", scan->bus);
mods_error_printk("bus %04x:%02x not found\n",
rescan->domain, rescan->bus);
err = -EINVAL;
}
LOG_EXT();
return err;
#else
return -EINVAL;
#endif
}
int esc_mods_pci_hot_reset(struct mods_client *client,
@@ -660,8 +673,8 @@ int esc_mods_pio_write(struct mods_client *client, struct MODS_PIO_WRITE *p)
return OK;
}
int esc_mods_device_numa_info_2(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO_2 *p)
int esc_mods_device_numa_info_3(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO_3 *p)
{
struct pci_dev *dev;
int err;
@@ -682,55 +695,135 @@ int esc_mods_device_numa_info_2(struct mods_client *client,
p->node = dev_to_node(&dev->dev);
if (p->node != -1) {
const unsigned long *maskp
= cpumask_bits(cpumask_of_node(p->node));
unsigned int i, word, bit, maskidx;
u32 first_offset = ~0U;
unsigned int i;
const unsigned long *maskp;
if (((nr_cpumask_bits + 31) / 32) > MAX_CPU_MASKS) {
mods_error_printk("too many CPUs (%d) for mask bits\n",
nr_cpumask_bits);
pci_dev_put(dev);
LOG_EXT();
return -EINVAL;
maskp = cpumask_bits(cpumask_of_node(p->node));
memset(&p->node_cpu_mask, 0, sizeof(p->node_cpu_mask));
for (i = 0; i < nr_cpumask_bits; i += 32) {
const u32 word = i / BITS_PER_LONG;
const u32 bit = i % BITS_PER_LONG;
const u32 cur_mask = (u32)(maskp[word] >> bit);
u32 mask_idx;
if (first_offset == ~0U) {
if (cur_mask) {
first_offset = i / 32;
p->first_cpu_mask_offset = first_offset;
} else
continue;
}
mask_idx = (i / 32) - first_offset;
if (cur_mask && mask_idx >= MAX_CPU_MASKS_3) {
mods_error_printk("too many CPUs (%d) for mask bits\n",
nr_cpumask_bits);
pci_dev_put(dev);
LOG_EXT();
return -EINVAL;
}
if (mask_idx < MAX_CPU_MASKS_3)
p->node_cpu_mask[mask_idx] = cur_mask;
}
for (i = 0, maskidx = 0;
i < nr_cpumask_bits;
i += 32, maskidx++) {
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
p->node_cpu_mask[maskidx]
= (maskp[word] >> bit) & 0xFFFFFFFFUL;
}
if (first_offset == ~0U)
p->first_cpu_mask_offset = 0;
}
p->node_count = num_possible_nodes();
p->cpu_count = num_possible_cpus();
p->cpu_count = num_possible_cpus();
pci_dev_put(dev);
LOG_EXT();
return OK;
}
int esc_mods_device_numa_info_2(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO_2 *p)
{
int err;
struct MODS_DEVICE_NUMA_INFO_3 numa_info = { {0} };
numa_info.pci_device = p->pci_device;
err = esc_mods_device_numa_info_3(client, &numa_info);
if (likely(!err)) {
int i;
p->node = numa_info.node;
p->node_count = numa_info.node_count;
p->cpu_count = numa_info.cpu_count;
memset(&p->node_cpu_mask, 0, sizeof(p->node_cpu_mask));
for (i = 0; i < MAX_CPU_MASKS_3; i++) {
const u32 cur_mask = numa_info.node_cpu_mask[i];
const u32 dst = i +
numa_info.first_cpu_mask_offset;
if (cur_mask && dst >= MAX_CPU_MASKS) {
mods_error_printk("too many CPUs (%d) for mask bits\n",
nr_cpumask_bits);
err = -EINVAL;
break;
}
if (dst < MAX_CPU_MASKS)
p->node_cpu_mask[dst]
= numa_info.node_cpu_mask[i];
}
}
return err;
}
int esc_mods_device_numa_info(struct mods_client *client,
struct MODS_DEVICE_NUMA_INFO *p)
{
int err;
int i;
struct MODS_DEVICE_NUMA_INFO_2 numa_info = { {0} };
struct MODS_DEVICE_NUMA_INFO_3 numa_info = { {0} };
numa_info.pci_device.domain = 0;
numa_info.pci_device.bus = p->pci_device.bus;
numa_info.pci_device.device = p->pci_device.device;
numa_info.pci_device.function = p->pci_device.function;
err = esc_mods_device_numa_info_2(client, &numa_info);
err = esc_mods_device_numa_info_3(client, &numa_info);
if (likely(!err)) {
int i;
p->node = numa_info.node;
p->node_count = numa_info.node_count;
p->cpu_count = numa_info.cpu_count;
for (i = 0; i < MAX_CPU_MASKS; i++)
p->node_cpu_mask[i] = numa_info.node_cpu_mask[i];
memset(&p->node_cpu_mask, 0, sizeof(p->node_cpu_mask));
for (i = 0; i < MAX_CPU_MASKS_3; i++) {
const u32 cur_mask = numa_info.node_cpu_mask[i];
const u32 dst = i +
numa_info.first_cpu_mask_offset;
if (cur_mask && dst >= MAX_CPU_MASKS) {
mods_error_printk("too many CPUs (%d) for mask bits\n",
nr_cpumask_bits);
err = -EINVAL;
break;
}
if (dst < MAX_CPU_MASKS)
p->node_cpu_mask[dst]
= numa_info.node_cpu_mask[i];
}
}
return err;