misc: mods: update MODS driver from Perforce

Bug 2142482

Change-Id: I340ffd337a963e95b59a2496686039f339be860f
Signed-off-by: Chris Dragan <kdragan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1754568
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Chris Dragan
2018-06-19 07:36:08 -07:00
committed by Laxman Dewangan
parent bf5cb93fe0
commit e5ec8349f7
6 changed files with 691 additions and 1210 deletions

View File

@@ -24,7 +24,7 @@
/* Driver version */
#define MODS_DRIVER_VERSION_MAJOR 3
#define MODS_DRIVER_VERSION_MINOR 86
#define MODS_DRIVER_VERSION_MINOR 87
#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
((MODS_DRIVER_VERSION_MINOR/10) << 4) | \
(MODS_DRIVER_VERSION_MINOR%10))
@@ -161,6 +161,17 @@ struct MODS_DMA_MAP_MEMORY {
struct mods_pci_dev_2 pci_device;
};
/* MODS_ESC_PCI_SET_DMA_MASK */
struct MODS_PCI_DMA_MASK {
/* IN */
struct mods_pci_dev_2 pci_device;
__u32 num_bits;
};
#define MODS_SWIOTLB_DISABLED 0
#define MODS_SWIOTLB_ACTIVE 1
#define MODS_SWIOTLB_INDETERMINATE 2
/* MODS_ESC_GET_IOMMU_STATE */
struct MODS_GET_IOMMU_STATE {
/* IN */
@@ -507,6 +518,7 @@ struct mods_mask_info {
__u64 and_mask; /*and mask for clearing bits in this register */
__u64 or_mask; /*or value for setting bit in this register */
};
struct MODS_SET_IRQ_MULTIMASK {
/* IN */
__u64 aperture_addr; /* physical address of aperture */
@@ -518,7 +530,6 @@ struct MODS_SET_IRQ_MULTIMASK {
__u8 irq_type; /* irq type */
};
/* MODS_ESC_SET_IRQ_MASK_2 */
struct MODS_SET_IRQ_MASK_2 {
/* IN */
@@ -785,6 +796,37 @@ struct MODS_TEGRA_DC_CONFIG_POSSIBLE {
__u8 possible;
};
#define MODS_TEGRA_DC_SETUP_SD_LUT_SIZE 9
#define MODS_TEGRA_DC_SETUP_BLTF_SIZE 16
/* MODS_ESC_TEGRA_DC_SETUP_SD */
struct MODS_TEGRA_DC_SETUP_SD {
/* IN */
__u8 head;
__u8 enable;
__u8 use_vid_luma;
__u8 csc_r;
__u8 csc_g;
__u8 csc_b;
__u8 aggressiveness;
__u8 bin_width_log2;
__u32 lut[MODS_TEGRA_DC_SETUP_SD_LUT_SIZE];
__u32 bltf[MODS_TEGRA_DC_SETUP_BLTF_SIZE];
__u32 klimit;
__u32 soft_clipping_threshold;
__u32 smooth_k_inc;
__u8 k_init_bias;
__u32 win_x;
__u32 win_y;
__u32 win_w;
__u32 win_h;
};
/* MODS_ESC_DMABUF_GET_PHYSICAL_ADDRESS */
struct MODS_DMABUF_GET_PHYSICAL_ADDRESS {
/* IN */
@@ -1132,6 +1174,8 @@ struct MODS_SET_NUM_VF {
#define MODS_ESC_TEGRA_DC_CONFIG_POSSIBLE \
_IOWR(MODS_IOC_MAGIC, 47, \
struct MODS_TEGRA_DC_CONFIG_POSSIBLE)
#define MODS_ESC_TEGRA_DC_SETUP_SD \
_IOW(MODS_IOC_MAGIC, 48, struct MODS_TEGRA_DC_SETUP_SD)
#define MODS_ESC_DMABUF_GET_PHYSICAL_ADDRESS \
_IOWR(MODS_IOC_MAGIC, 49, \
struct MODS_DMABUF_GET_PHYSICAL_ADDRESS)
@@ -1290,5 +1334,9 @@ struct MODS_SET_NUM_VF {
_IOW(MODS_IOC_MAGIC, 116, struct MODS_SET_NUM_VF)
#define MODS_ESC_SET_TOTAL_VF \
_IOW(MODS_IOC_MAGIC, 117, struct MODS_SET_NUM_VF)
#define MODS_ESC_PCI_SET_DMA_MASK \
_IOW(MODS_IOC_MAGIC, 118, struct MODS_PCI_DMA_MASK)
#define MODS_ESC_GET_IOMMU_STATE_2 \
_IOWR(MODS_IOC_MAGIC, 119, struct MODS_GET_IOMMU_STATE)
#endif /* _MODS_H_ */

View File

@@ -55,10 +55,10 @@
struct en_dev_entry {
struct pci_dev *dev;
struct en_dev_entry *next;
__u32 irqs_allocated;
__u32 irq_flags;
__u32 nvecs;
struct msix_entry *msix_entries;
u32 irq_flags;
u32 nvecs;
u8 client_id;
};
struct mem_type {
@@ -67,26 +67,41 @@ struct mem_type {
u32 type;
};
/* file private data */
struct mods_file_private_data {
struct list_head *mods_alloc_list;
struct list_head *mods_mapping_list;
struct list_head *mods_pci_res_map_list;
struct irq_q_data {
u32 time;
struct pci_dev *dev;
u32 irq;
u32 irq_index;
};
struct irq_q_info {
struct irq_q_data data[MODS_MAX_IRQS];
u32 head;
u32 tail;
};
/* The driver can be opened simultaneously multiple times, from the same or from
* different processes. This structure tracks data specific to each open fd.
*/
struct mods_client {
struct list_head irq_list;
struct list_head mem_alloc_list;
struct list_head mem_map_list;
#if defined(CONFIG_PPC64)
struct list_head *mods_ppc_tce_bypass_list;
struct list_head *mods_nvlink_sysmem_trained_list;
struct list_head ppc_tce_bypass_list;
struct list_head nvlink_sysmem_trained_list;
#endif
wait_queue_head_t interrupt_event;
struct irq_q_info irq_queue;
spinlock_t irq_lock;
struct en_dev_entry *enabled_devices;
int mods_id;
struct mem_type mem_type;
struct mutex mtx;
int mods_fb_suspended[FB_MAX];
u32 access_token;
u8 client_id;
};
#define MODS_PRIV struct mods_file_private_data *
/* VM private data */
struct mods_vm_private_data {
struct file *fp;
@@ -133,7 +148,7 @@ struct MODS_MEM_INFO {
* memory was allocated on
*/
struct list_head *dma_map_list;
struct list_head dma_map_list;
struct list_head list;
@@ -191,7 +206,7 @@ struct NVL_TRAINED {
#define IRQ_MAX (256+PCI_IRQ_MAX)
#define PCI_IRQ_MAX 15
#define MODS_CHANNEL_MAX 32
#define MODS_MAX_CLIENTS 32
#define IRQ_VAL_POISON 0xfafbfcfdU
@@ -230,19 +245,6 @@ struct NVL_TRAINED {
#define mods_warning_printk(fmt, args...)\
pr_info("mods warning: " fmt, ##args)
struct irq_q_data {
u32 time;
struct pci_dev *dev;
u32 irq;
u32 irq_index;
};
struct irq_q_info {
struct irq_q_data data[MODS_MAX_IRQS];
u32 head;
u32 tail;
};
struct irq_mask_info {
u32 *dev_irq_mask_reg; /*IRQ mask register, read-only reg*/
u32 *dev_irq_state; /* IRQ status register*/
@@ -257,7 +259,7 @@ struct dev_irq_map {
u32 apic_irq;
u32 entry;
u8 type;
u8 channel;
u8 client_id;
u8 mask_info_cnt;
struct irq_mask_info mask_info[MODS_IRQ_MAX_MASKS];
struct pci_dev *dev;
@@ -265,16 +267,14 @@ struct dev_irq_map {
};
struct mods_priv {
/* map info from pci irq to apic irq */
struct list_head irq_head[MODS_CHANNEL_MAX];
/* Bitmap for each allocated client id. */
unsigned long client_flags;
/* bits map for each allocated id. Each mods has an id. */
/* the design is to take into account multi mods. */
unsigned long channel_flags;
/* Client structures */
struct mods_client clients[MODS_MAX_CLIENTS];
/* fifo loop queue */
struct irq_q_info rec_info[MODS_CHANNEL_MAX];
spinlock_t lock;
/* Mutex for guarding interrupt logic and PCI device enablement */
struct mutex mtx;
};
#ifndef MODS_HAS_SET_MEMORY
@@ -362,9 +362,15 @@ struct mods_priv {
#define MODS_ACPI_HANDLE(dev) DEVICE_ACPI_HANDLE(dev)
#endif
/* FILE */
#define MODS_GET_FILE_PRIVATE_ID(fp) (((struct mods_file_private_data *)(fp) \
->private_data)->mods_id)
static inline u8 get_client_id(struct file *fp)
{
return ((struct mods_client *)(fp->private_data))->client_id;
}
static inline int is_client_id_valid(u8 client_id)
{
return client_id > 0 && client_id <= MODS_MAX_CLIENTS;
}
/* ************************************************************************* */
/* ** MODULE WIDE FUNCTIONS */
@@ -373,11 +379,11 @@ struct mods_priv {
/* irq */
void mods_init_irq(void);
void mods_cleanup_irq(void);
unsigned char mods_alloc_channel(void);
void mods_free_channel(unsigned char channel);
void mods_irq_dev_clr_pri(unsigned char id);
void mods_irq_dev_set_pri(unsigned char id, void *pri);
int mods_irq_event_check(unsigned char channel);
struct mutex *mods_get_irq_mutex(void);
struct mods_client *mods_alloc_client(void);
void mods_free_client_interrupts(struct mods_client *client);
void mods_free_client(u8 client_id);
int mods_irq_event_check(u8 client_id);
/* mem */
const char *mods_get_prot_str(u32 mem_type);
@@ -392,13 +398,9 @@ int mods_unregister_all_nvlink_sysmem_trained(struct file *fp);
#endif
#ifdef CONFIG_PCI
int mods_enable_device(struct mods_file_private_data *priv,
struct pci_dev *pdev);
struct en_dev_entry *mods_enable_device(struct mods_client *client,
struct pci_dev *dev);
void mods_disable_device(struct pci_dev *pdev);
int mods_unregister_all_pci_res_mappings(struct file *fp);
#define MODS_UNREGISTER_PCI_MAP(fp) mods_unregister_all_pci_res_mappings(fp)
#else
#define MODS_UNREGISTER_PCI_MAP(fp) 0
#endif
/* clock */
@@ -489,12 +491,12 @@ int esc_mods_device_numa_info(struct file *fp,
struct MODS_DEVICE_NUMA_INFO *p);
int esc_mods_device_numa_info_2(struct file *fp,
struct MODS_DEVICE_NUMA_INFO_2 *p);
int esc_mods_pci_map_resource(struct file *fp,
struct MODS_PCI_MAP_RESOURCE *p);
int esc_mods_pci_unmap_resource(struct file *fp,
struct MODS_PCI_UNMAP_RESOURCE *p);
int esc_mods_get_iommu_state(struct file *pfile,
struct MODS_GET_IOMMU_STATE *state);
int esc_mods_get_iommu_state_2(struct file *pfile,
struct MODS_GET_IOMMU_STATE *state);
int esc_mods_pci_set_dma_mask(struct file *pfile,
struct MODS_PCI_DMA_MASK *dma_mask);
#endif
/* irq */
#if defined(MODS_TEGRA) && defined(CONFIG_OF_IRQ) && defined(CONFIG_OF)
@@ -510,11 +512,6 @@ int esc_mods_unregister_irq_2(struct file *fp,
struct MODS_REGISTER_IRQ_2 *p);
int esc_mods_query_irq(struct file *fp, struct MODS_QUERY_IRQ *p);
int esc_mods_query_irq_2(struct file *fp, struct MODS_QUERY_IRQ_2 *p);
int esc_mods_set_irq_mask(struct file *fp, struct MODS_SET_IRQ_MASK *p);
int esc_mods_set_irq_mask_2(struct file *fp,
struct MODS_SET_IRQ_MASK_2 *p);
int esc_mods_set_irq_multimask(struct file *fp,
struct MODS_SET_IRQ_MULTIMASK *p);
int esc_mods_irq_handled(struct file *fp, struct MODS_REGISTER_IRQ *p);
int esc_mods_irq_handled_2(struct file *fp,
struct MODS_REGISTER_IRQ_2 *p);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -126,7 +126,7 @@ struct pci_driver mods_pci_driver = {
* used to avoid globalization of variables *
***********************************************/
static int debug = -0x80000000;
static int debug;
static int multi_instance = MODS_MULTI_INSTANCE_DEFAULT_VALUE;
static u32 access_token = MODS_ACCESS_TOKEN_NONE;
@@ -290,9 +290,9 @@ static int mods_set_access_token(u32 tok)
static int mods_check_access_token(struct file *fp)
{
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
if (private_data->access_token != mods_get_access_token())
if (client->access_token != mods_get_access_token())
return -EFAULT;
return OK;
@@ -307,6 +307,8 @@ static int __init mods_init_module(void)
LOG_ENT();
mods_init_irq();
rc = misc_register(&mods_dev);
if (rc < 0)
return -EBUSY;
@@ -317,8 +319,6 @@ static int __init mods_init_module(void)
return -EBUSY;
#endif
mods_init_irq();
#if defined(MODS_HAS_CLOCK)
mods_init_clock_api();
#endif
@@ -402,26 +402,23 @@ MODULE_PARM_DESC(ppc_tce_bypass,
/********************
* HELPER FUNCTIONS *
********************/
static int id_is_valid(unsigned char channel)
{
if (channel <= 0 || channel > MODS_CHANNEL_MAX)
return -EINVAL;
return OK;
}
static void mods_disable_all_devices(struct mods_file_private_data *priv)
static void mods_disable_all_devices(struct mods_client *client)
{
#ifdef CONFIG_PCI
while (priv->enabled_devices != 0) {
struct en_dev_entry *old = priv->enabled_devices;
if (unlikely(mutex_lock_interruptible(mods_get_irq_mutex())))
return;
while (client->enabled_devices != 0) {
struct en_dev_entry *old = client->enabled_devices;
mods_disable_device(old->dev);
priv->enabled_devices = old->next;
client->enabled_devices = old->next;
kfree(old);
}
mutex_unlock(mods_get_irq_mutex());
#else
WARN_ON(priv->enabled_devices != 0);
WARN_ON(client->enabled_devices != 0);
#endif
}
@@ -438,7 +435,7 @@ static int mods_register_mapping(
u64 mapping_length)
{
struct SYS_MAP_MEMORY *p_map_mem;
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
LOG_ENT();
@@ -454,7 +451,7 @@ static int mods_register_mapping(
p_map_mem->mapping_length = mapping_length;
p_map_mem->p_mem_info = p_mem_info;
list_add(&p_map_mem->list, private_data->mods_mapping_list);
list_add(&p_map_mem->list, &client->mem_map_list);
mods_debug_printk(DEBUG_MEM_DETAILED,
"map alloc %p as %p: phys 0x%llx, virt 0x%llx, size 0x%llx\n",
@@ -467,10 +464,9 @@ static int mods_register_mapping(
static void mods_unregister_mapping(struct file *fp, u64 virtual_address)
{
struct SYS_MAP_MEMORY *p_map_mem;
MODS_PRIV private_data = fp->private_data;
struct list_head *head = private_data->mods_mapping_list;
struct list_head *iter;
struct mods_client *client = fp->private_data;
struct list_head *head = &client->mem_map_list;
struct list_head *iter;
LOG_ENT();
@@ -493,11 +489,10 @@ static void mods_unregister_mapping(struct file *fp, u64 virtual_address)
static void mods_unregister_all_mappings(struct file *fp)
{
struct SYS_MAP_MEMORY *p_map_mem;
MODS_PRIV private_data = fp->private_data;
struct list_head *head = private_data->mods_mapping_list;
struct list_head *iter;
struct list_head *tmp;
struct mods_client *client = fp->private_data;
struct list_head *head = &client->mem_map_list;
struct list_head *iter;
struct list_head *tmp;
LOG_ENT();
@@ -531,12 +526,12 @@ static pgprot_t mods_get_prot(u32 mem_type, pgprot_t prot)
static pgprot_t mods_get_prot_for_range(struct file *fp, u64 dma_addr,
u64 size, pgprot_t prot)
{
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
if ((dma_addr == private_data->mem_type.dma_addr) &&
(size == private_data->mem_type.size)) {
if ((dma_addr == client->mem_type.dma_addr) &&
(size == client->mem_type.size)) {
return mods_get_prot(private_data->mem_type.type, prot);
return mods_get_prot(client->mem_type.type, prot);
}
return prot;
}
@@ -562,12 +557,12 @@ static const char *mods_get_prot_str_for_range(struct file *fp,
u64 dma_addr,
u64 size)
{
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
if ((dma_addr == private_data->mem_type.dma_addr) &&
(size == private_data->mem_type.size)) {
if ((dma_addr == client->mem_type.dma_addr) &&
(size == client->mem_type.size)) {
return mods_get_prot_str(private_data->mem_type.type);
return mods_get_prot_str(client->mem_type.type);
}
return "default";
}
@@ -639,11 +634,11 @@ static void mods_krnl_vma_close(struct vm_area_struct *vma)
struct mods_vm_private_data *vma_private_data
= MODS_VMA_PRIVATE(vma);
if (atomic_dec_and_test(&vma_private_data->usage_count)) {
MODS_PRIV private_data =
struct mods_client *client =
vma_private_data->fp->private_data;
if (unlikely(mutex_lock_interruptible(
&private_data->mtx))) {
&client->mtx))) {
LOG_EXT();
return;
}
@@ -657,7 +652,7 @@ static void mods_krnl_vma_close(struct vm_area_struct *vma)
MODS_VMA_PRIVATE(vma) = NULL;
kfree(vma_private_data);
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
}
}
LOG_EXT();
@@ -670,126 +665,18 @@ static const struct vm_operations_struct mods_krnl_vm_ops = {
static int mods_krnl_open(struct inode *ip, struct file *fp)
{
struct list_head *mods_alloc_list;
struct list_head *mods_mapping_list;
struct list_head *mods_pci_res_map_list;
#if defined(CONFIG_PPC64)
struct list_head *mods_ppc_tce_bypass_list;
struct list_head *mods_nvlink_sysmem_trained_list;
#endif
struct mods_file_private_data *private_data;
int id = 0;
int i = 0;
struct mods_client *client;
LOG_ENT();
mods_alloc_list = kmalloc(sizeof(struct list_head),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!mods_alloc_list)) {
LOG_EXT();
return -ENOMEM;
}
mods_mapping_list = kmalloc(sizeof(struct list_head),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!mods_mapping_list)) {
kfree(mods_alloc_list);
LOG_EXT();
return -ENOMEM;
}
mods_pci_res_map_list = kmalloc(sizeof(struct list_head),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!mods_pci_res_map_list)) {
kfree(mods_alloc_list);
kfree(mods_mapping_list);
LOG_EXT();
return -ENOMEM;
}
#if defined(CONFIG_PPC64)
mods_ppc_tce_bypass_list =
kmalloc(sizeof(struct list_head), GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!mods_ppc_tce_bypass_list)) {
kfree(mods_alloc_list);
kfree(mods_mapping_list);
kfree(mods_pci_res_map_list);
LOG_EXT();
return -ENOMEM;
}
mods_nvlink_sysmem_trained_list =
kmalloc(sizeof(struct list_head), GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!mods_nvlink_sysmem_trained_list)) {
kfree(mods_alloc_list);
kfree(mods_mapping_list);
kfree(mods_pci_res_map_list);
kfree(mods_ppc_tce_bypass_list);
LOG_EXT();
return -ENOMEM;
}
#endif
private_data = kmalloc(sizeof(*private_data),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!private_data)) {
kfree(mods_alloc_list);
kfree(mods_mapping_list);
kfree(mods_pci_res_map_list);
#if defined(CONFIG_PPC64)
kfree(mods_ppc_tce_bypass_list);
kfree(mods_nvlink_sysmem_trained_list);
#endif
LOG_EXT();
return -ENOMEM;
}
id = mods_alloc_channel();
if (id_is_valid(id) != OK) {
client = mods_alloc_client();
if (client == NULL) {
mods_error_printk("too many clients\n");
kfree(mods_alloc_list);
kfree(mods_mapping_list);
kfree(mods_pci_res_map_list);
#if defined(CONFIG_PPC64)
kfree(mods_ppc_tce_bypass_list);
kfree(mods_nvlink_sysmem_trained_list);
#endif
kfree(private_data);
LOG_EXT();
return -EBUSY;
}
private_data->mods_id = id;
mods_irq_dev_set_pri(id, private_data);
INIT_LIST_HEAD(mods_alloc_list);
INIT_LIST_HEAD(mods_mapping_list);
INIT_LIST_HEAD(mods_pci_res_map_list);
private_data->mods_alloc_list = mods_alloc_list;
private_data->mods_mapping_list = mods_mapping_list;
private_data->mods_pci_res_map_list = mods_pci_res_map_list;
#if defined(CONFIG_PPC64)
INIT_LIST_HEAD(mods_ppc_tce_bypass_list);
INIT_LIST_HEAD(mods_nvlink_sysmem_trained_list);
private_data->mods_ppc_tce_bypass_list = mods_ppc_tce_bypass_list;
private_data->mods_nvlink_sysmem_trained_list
= mods_nvlink_sysmem_trained_list;
#endif
private_data->enabled_devices = 0;
private_data->mem_type.dma_addr = 0;
private_data->mem_type.size = 0;
private_data->mem_type.type = 0;
mutex_init(&private_data->mtx);
for (i = 0; i < FB_MAX; i++)
private_data->mods_fb_suspended[i] = 0;
init_waitqueue_head(&private_data->interrupt_event);
private_data->access_token = MODS_ACCESS_TOKEN_NONE;
fp->private_data = private_data;
fp->private_data = client;
mods_info_printk("driver opened\n");
LOG_EXT();
@@ -798,30 +685,26 @@ static int mods_krnl_open(struct inode *ip, struct file *fp)
static int mods_krnl_close(struct inode *ip, struct file *fp)
{
MODS_PRIV private_data = fp->private_data;
unsigned char id = MODS_GET_FILE_PRIVATE_ID(fp);
int ret = OK;
struct mods_client *client = fp->private_data;
u8 client_id = client->client_id;
int ret = OK;
LOG_ENT();
WARN_ON(id_is_valid(id) != OK);
if (id_is_valid(id) != OK) {
WARN_ON(!is_client_id_valid(client_id));
if (!is_client_id_valid(client_id)) {
LOG_EXT();
return -EINVAL;
}
mods_resume_console(fp);
mods_free_client_interrupts(client);
mods_free_channel(id);
mods_irq_dev_clr_pri(id);
mods_resume_console(fp);
mods_unregister_all_mappings(fp);
ret = mods_unregister_all_alloc(fp);
if (ret)
mods_error_printk("failed to free all memory\n");
ret = MODS_UNREGISTER_PCI_MAP(fp);
if (ret)
mods_error_printk("failed to free pci mappings\n");
#if defined(CONFIG_PPC64)
ret = mods_unregister_all_ppc_tce_bypass(fp);
@@ -833,16 +716,9 @@ static int mods_krnl_close(struct inode *ip, struct file *fp)
mods_error_printk("failed to free nvlink trained\n");
#endif
mods_disable_all_devices(private_data);
mods_disable_all_devices(client);
kfree(private_data->mods_alloc_list);
kfree(private_data->mods_mapping_list);
kfree(private_data->mods_pci_res_map_list);
#if defined(CONFIG_PPC64)
kfree(private_data->mods_ppc_tce_bypass_list);
kfree(private_data->mods_nvlink_sysmem_trained_list);
#endif
kfree(private_data);
mods_free_client(client_id);
mods_info_printk("driver closed\n");
LOG_EXT();
@@ -852,8 +728,8 @@ static int mods_krnl_close(struct inode *ip, struct file *fp)
static unsigned int mods_krnl_poll(struct file *fp, poll_table *wait)
{
unsigned int mask = 0;
MODS_PRIV private_data = fp->private_data;
unsigned char id = MODS_GET_FILE_PRIVATE_ID(fp);
struct mods_client *client = fp->private_data;
u8 client_id = get_client_id(fp);
int access_tok_ret = mods_check_access_token(fp);
if (access_tok_ret < 0)
@@ -861,10 +737,10 @@ static unsigned int mods_krnl_poll(struct file *fp, poll_table *wait)
if (!(fp->f_flags & O_NONBLOCK)) {
mods_debug_printk(DEBUG_ISR_DETAILED, "poll wait\n");
poll_wait(fp, &private_data->interrupt_event, wait);
poll_wait(fp, &client->interrupt_event, wait);
}
/* if any interrupts pending then check intr, POLLIN on irq */
mask |= mods_irq_event_check(id);
mask |= mods_irq_event_check(client_id);
mods_debug_printk(DEBUG_ISR_DETAILED, "poll mask 0x%x\n", mask);
return mask;
}
@@ -903,13 +779,13 @@ static int mods_krnl_mmap(struct file *fp, struct vm_area_struct *vma)
{
int ret = OK;
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
if (unlikely(mutex_lock_interruptible(&private_data->mtx)))
if (unlikely(mutex_lock_interruptible(&client->mtx)))
ret = -EINTR;
else {
ret = mods_krnl_map_inner(fp, vma);
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
}
LOG_EXT();
return ret;
@@ -1162,14 +1038,14 @@ static int esc_mods_suspend_console(struct file *pfile)
#if defined(CONFIG_FB) && defined(MODS_HAS_CONSOLE_LOCK)
if (num_registered_fb) {
/* tell the os to block fb accesses */
MODS_PRIV private_data = pfile->private_data;
struct mods_client *client = pfile->private_data;
int i = 0;
for (i = 0; i < num_registered_fb; i++) {
console_lock();
if (registered_fb[i]->state != FBINFO_STATE_SUSPENDED) {
fb_set_suspend(registered_fb[i], 1);
private_data->mods_fb_suspended[i] = 1;
client->mods_fb_suspended[i] = 1;
}
console_unlock();
}
@@ -1207,14 +1083,14 @@ static int mods_resume_console(struct file *pfile)
#if defined(CONFIG_FB) && defined(MODS_HAS_CONSOLE_LOCK)
if (num_registered_fb) {
MODS_PRIV private_data = pfile->private_data;
struct mods_client *client = pfile->private_data;
int i = 0;
for (i = 0; i < num_registered_fb; i++) {
console_lock();
if (private_data->mods_fb_suspended[i]) {
if (client->mods_fb_suspended[i]) {
fb_set_suspend(registered_fb[i], 0);
private_data->mods_fb_suspended[i] = 0;
client->mods_fb_suspended[i] = 0;
}
console_unlock();
}
@@ -1258,9 +1134,9 @@ static int esc_mods_acquire_access_token(struct file *pfile,
if (ret < 0) {
mods_error_printk("unable to set access token!\n");
} else {
MODS_PRIV private_data = pfile->private_data;
struct mods_client *client = pfile->private_data;
private_data->access_token = ptoken->token;
client->access_token = ptoken->token;
}
LOG_EXT();
@@ -1286,9 +1162,9 @@ static int esc_mods_release_access_token(struct file *pfile,
if (ret < 0) {
mods_error_printk("unable to clear access token!\n");
} else {
MODS_PRIV private_data = pfile->private_data;
struct mods_client *client = pfile->private_data;
private_data->access_token = MODS_ACCESS_TOKEN_NONE;
client->access_token = MODS_ACCESS_TOKEN_NONE;
}
LOG_EXT();
@@ -1304,9 +1180,9 @@ static int esc_mods_verify_access_token(struct file *pfile,
LOG_ENT();
if (ptoken->token == mods_get_access_token()) {
MODS_PRIV private_data = pfile->private_data;
struct mods_client *client = pfile->private_data;
private_data->access_token = ptoken->token;
client->access_token = ptoken->token;
ret = OK;
} else
mods_error_printk("invalid access token\n");
@@ -1596,22 +1472,23 @@ static long mods_krnl_ioctl(struct file *fp,
MODS_DEVICE_NUMA_INFO_2);
break;
case MODS_ESC_PCI_MAP_RESOURCE:
MODS_IOCTL(MODS_ESC_PCI_MAP_RESOURCE,
esc_mods_pci_map_resource,
MODS_PCI_MAP_RESOURCE);
break;
case MODS_ESC_PCI_UNMAP_RESOURCE:
MODS_IOCTL(MODS_ESC_PCI_UNMAP_RESOURCE,
esc_mods_pci_unmap_resource,
MODS_PCI_UNMAP_RESOURCE);
break;
case MODS_ESC_GET_IOMMU_STATE:
MODS_IOCTL(MODS_ESC_GET_IOMMU_STATE,
esc_mods_get_iommu_state,
MODS_GET_IOMMU_STATE);
break;
case MODS_ESC_GET_IOMMU_STATE_2:
MODS_IOCTL(MODS_ESC_GET_IOMMU_STATE_2,
esc_mods_get_iommu_state_2,
MODS_GET_IOMMU_STATE);
break;
case MODS_ESC_PCI_SET_DMA_MASK:
MODS_IOCTL(MODS_ESC_PCI_SET_DMA_MASK,
esc_mods_pci_set_dma_mask,
MODS_PCI_DMA_MASK);
break;
#endif
case MODS_ESC_ALLOC_PAGES:
@@ -1767,23 +1644,6 @@ static long mods_krnl_ioctl(struct file *fp,
esc_mods_query_irq_2, MODS_QUERY_IRQ_2);
break;
case MODS_ESC_SET_IRQ_MASK:
MODS_IOCTL_NORETVAL(MODS_ESC_SET_IRQ_MASK,
esc_mods_set_irq_mask, MODS_SET_IRQ_MASK);
break;
case MODS_ESC_SET_IRQ_MASK_2:
MODS_IOCTL_NORETVAL(MODS_ESC_SET_IRQ_MASK_2,
esc_mods_set_irq_mask_2,
MODS_SET_IRQ_MASK_2);
break;
case MODS_ESC_SET_IRQ_MULTIMASK:
MODS_IOCTL_NORETVAL(MODS_ESC_SET_IRQ_MULTIMASK,
esc_mods_set_irq_multimask,
MODS_SET_IRQ_MULTIMASK);
break;
case MODS_ESC_IRQ_HANDLED:
MODS_IOCTL_NORETVAL(MODS_ESC_IRQ_HANDLED,
esc_mods_irq_handled, MODS_REGISTER_IRQ);

View File

@@ -77,7 +77,7 @@ static int mods_dma_unmap_and_free(struct MODS_MEM_INFO *p_mem_info,
struct list_head *head;
struct list_head *iter;
head = p_mem_info->dma_map_list;
head = &p_mem_info->dma_map_list;
list_for_each(iter, head) {
p_dma_map = list_entry(iter, struct MODS_DMA_MAP, list);
@@ -115,13 +115,10 @@ static int mods_dma_unmap_and_free(struct MODS_MEM_INFO *p_mem_info,
int mods_dma_unmap_all(struct MODS_MEM_INFO *p_mem_info,
struct pci_dev *p_pci_dev)
{
struct list_head *head = p_mem_info->dma_map_list;
struct list_head *head = &p_mem_info->dma_map_list;
struct list_head *iter;
struct list_head *tmp;
if (!p_mem_info->dma_map_list)
return OK;
list_for_each_safe(iter, tmp, head) {
struct MODS_DMA_MAP *p_dma_map;
int ret;
@@ -182,17 +179,7 @@ static int mods_create_dma_map(struct MODS_MEM_INFO *p_mem_info,
struct pci_dev *p_pci_dev)
{
struct MODS_DMA_MAP *p_dma_map;
int list_allocated = 0;
u32 alloc_size;
if (!p_mem_info->dma_map_list) {
p_mem_info->dma_map_list = kmalloc(sizeof(struct list_head),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!p_mem_info->dma_map_list))
return -ENOMEM;
INIT_LIST_HEAD(p_mem_info->dma_map_list);
list_allocated = 1;
}
u32 alloc_size;
alloc_size = sizeof(*p_dma_map) +
(p_mem_info->max_chunks - 1) *
@@ -201,18 +188,14 @@ static int mods_create_dma_map(struct MODS_MEM_INFO *p_mem_info,
p_dma_map = kmalloc(alloc_size, GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!p_dma_map)) {
mods_error_printk("failed to allocate device map data\n");
if (list_allocated) {
kfree(p_mem_info->dma_map_list);
p_mem_info->dma_map_list = NULL;
return -ENOMEM;
}
return -ENOMEM;
}
memset(p_dma_map, 0, alloc_size);
p_dma_map->dev = p_pci_dev;
mods_dma_map_pages(p_mem_info, p_dma_map);
list_add(&p_dma_map->list, p_mem_info->dma_map_list);
list_add(&p_dma_map->list, &p_mem_info->dma_map_list);
return OK;
}
@@ -230,7 +213,7 @@ static struct MODS_MAP_CHUNK *mods_find_dma_map_chunk(
struct list_head *iter;
int i;
head = p_mem_info->dma_map_list;
head = &p_mem_info->dma_map_list;
if (!head)
return NULL;
@@ -485,12 +468,12 @@ failed:
static int mods_register_alloc(struct file *fp,
struct MODS_MEM_INFO *p_mem_info)
{
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
if (unlikely(mutex_lock_interruptible(&private_data->mtx)))
if (unlikely(mutex_lock_interruptible(&client->mtx)))
return -EINTR;
list_add(&p_mem_info->list, private_data->mods_alloc_list);
mutex_unlock(&private_data->mtx);
list_add(&p_mem_info->list, &client->mem_alloc_list);
mutex_unlock(&client->mtx);
return OK;
}
@@ -498,16 +481,16 @@ static int mods_unregister_and_free(struct file *fp,
struct MODS_MEM_INFO *p_del_mem)
{
struct MODS_MEM_INFO *p_mem_info;
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
struct list_head *head;
struct list_head *iter;
mods_debug_printk(DEBUG_MEM_DETAILED, "free %p\n", p_del_mem);
if (unlikely(mutex_lock_interruptible(&private_data->mtx)))
if (unlikely(mutex_lock_interruptible(&client->mtx)))
return -EINTR;
head = private_data->mods_alloc_list;
head = &client->mem_alloc_list;
list_for_each(iter, head) {
p_mem_info = list_entry(iter, struct MODS_MEM_INFO, list);
@@ -515,7 +498,7 @@ static int mods_unregister_and_free(struct file *fp,
if (p_del_mem == p_mem_info) {
list_del(iter);
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_dma_unmap_all(p_mem_info, NULL);
mods_restore_cache(p_mem_info);
@@ -527,7 +510,7 @@ static int mods_unregister_and_free(struct file *fp,
}
}
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_error_printk("failed to unregister allocation %p\n",
p_del_mem);
@@ -536,10 +519,10 @@ static int mods_unregister_and_free(struct file *fp,
int mods_unregister_all_alloc(struct file *fp)
{
MODS_PRIV private_data = fp->private_data;
struct list_head *head = private_data->mods_alloc_list;
struct list_head *iter;
struct list_head *tmp;
struct mods_client *client = fp->private_data;
struct list_head *head = &client->mem_alloc_list;
struct list_head *iter;
struct list_head *tmp;
list_for_each_safe(iter, tmp, head) {
struct MODS_MEM_INFO *p_mem_info;
@@ -587,8 +570,8 @@ int mods_get_alloc_offset(struct MODS_MEM_INFO *p_mem_info,
struct MODS_MEM_INFO *mods_find_alloc(struct file *fp, u64 phys_addr)
{
MODS_PRIV private_data = fp->private_data;
struct list_head *plist_head = private_data->mods_alloc_list;
struct mods_client *client = fp->private_data;
struct list_head *plist_head = &client->mem_alloc_list;
struct list_head *plist_iter;
struct MODS_MEM_INFO *p_mem_info;
u64 offset;
@@ -732,9 +715,10 @@ int esc_mods_device_alloc_pages_2(struct file *fp,
p_mem_info->addr_bits = p->address_bits;
p_mem_info->num_pages = num_pages;
p_mem_info->numa_node = numa_node_id();
p_mem_info->dma_map_list = NULL;
p_mem_info->dev = NULL;
INIT_LIST_HEAD(&p_mem_info->dma_map_list);
if (p->pci_device.bus || p->pci_device.device) {
unsigned int devfn = PCI_DEVFN(p->pci_device.device,
p->pci_device.function);
@@ -799,13 +783,11 @@ int esc_mods_device_alloc_pages_2(struct file *fp,
mods_debug_printk(DEBUG_MEM_DETAILED, "alloc %p\n", p_mem_info);
ret = mods_register_alloc(fp, p_mem_info);
LOG_EXT();
return ret;
failed:
if (p_mem_info) {
kfree(p_mem_info->dma_map_list);
if (ret)
kfree(p_mem_info);
}
LOG_EXT();
return ret;
}
@@ -876,7 +858,7 @@ int esc_mods_free_pages(struct file *fp, struct MODS_FREE_PAGES *p)
int esc_mods_set_mem_type(struct file *fp, struct MODS_MEMORY_TYPE *p)
{
struct MODS_MEM_INFO *p_mem_info;
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
LOG_ENT();
@@ -892,25 +874,25 @@ int esc_mods_set_mem_type(struct file *fp, struct MODS_MEMORY_TYPE *p)
return -EINVAL;
}
if (unlikely(mutex_lock_interruptible(&private_data->mtx))) {
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
LOG_EXT();
return -EINTR;
}
p_mem_info = mods_find_alloc(fp, p->physical_address);
if (p_mem_info) {
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_error_printk("cannot set mem type on phys addr 0x%llx\n",
p->physical_address);
LOG_EXT();
return -EINVAL;
}
private_data->mem_type.dma_addr = p->physical_address;
private_data->mem_type.size = p->size;
private_data->mem_type.type = p->type;
client->mem_type.dma_addr = p->physical_address;
client->mem_type.size = p->size;
client->mem_type.type = p->type;
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
LOG_EXT();
return OK;
@@ -1071,18 +1053,18 @@ int esc_mods_virtual_to_phys(struct file *fp,
struct MODS_VIRTUAL_TO_PHYSICAL *p)
{
struct MODS_GET_PHYSICAL_ADDRESS get_phys_addr;
MODS_PRIV private_data = fp->private_data;
struct list_head *head;
struct list_head *iter;
struct mods_client *client = fp->private_data;
struct list_head *head;
struct list_head *iter;
LOG_ENT();
if (unlikely(mutex_lock_interruptible(&private_data->mtx))) {
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
LOG_EXT();
return -EINTR;
}
head = private_data->mods_mapping_list;
head = &client->mem_map_list;
list_for_each(iter, head) {
struct SYS_MAP_MEMORY *p_map_mem;
@@ -1103,7 +1085,7 @@ int esc_mods_virtual_to_phys(struct file *fp,
if (!p_map_mem->p_mem_info) {
p->physical_address = p_map_mem->dma_addr
+ virt_offs;
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_debug_printk(DEBUG_MEM_DETAILED,
"get phys: map %p virt 0x%llx -> 0x%llx\n",
@@ -1123,7 +1105,7 @@ int esc_mods_virtual_to_phys(struct file *fp,
(u64)(size_t)p_map_mem->p_mem_info;
get_phys_addr.offset = virt_offs + phys_offs;
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
ret = esc_mods_get_phys_addr(fp, &get_phys_addr);
if (ret != OK)
@@ -1140,7 +1122,7 @@ int esc_mods_virtual_to_phys(struct file *fp,
}
}
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_error_printk("invalid virtual address\n");
return -EINVAL;
@@ -1150,7 +1132,7 @@ int esc_mods_phys_to_virtual(struct file *fp,
struct MODS_PHYSICAL_TO_VIRTUAL *p)
{
struct SYS_MAP_MEMORY *p_map_mem;
MODS_PRIV private_data = fp->private_data;
struct mods_client *client = fp->private_data;
struct list_head *head;
struct list_head *iter;
u64 offset;
@@ -1158,12 +1140,12 @@ int esc_mods_phys_to_virtual(struct file *fp,
LOG_ENT();
if (unlikely(mutex_lock_interruptible(&private_data->mtx))) {
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
LOG_EXT();
return -EINTR;
}
head = private_data->mods_mapping_list;
head = &client->mem_map_list;
list_for_each(iter, head) {
p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
@@ -1180,7 +1162,7 @@ int esc_mods_phys_to_virtual(struct file *fp,
- p_map_mem->dma_addr;
p->virtual_address = p_map_mem->virtual_addr
+ offset;
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_debug_printk(DEBUG_MEM_DETAILED,
"get virt: map %p phys 0x%llx -> 0x%llx\n",
@@ -1207,7 +1189,7 @@ int esc_mods_phys_to_virtual(struct file *fp,
p->virtual_address = p_map_mem->virtual_addr
+ offset - map_offset;
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_debug_printk(DEBUG_MEM_DETAILED,
"get virt: map %p phys 0x%llx -> 0x%llx\n",
p_map_mem, p->physical_address, p->virtual_address);
@@ -1216,7 +1198,7 @@ int esc_mods_phys_to_virtual(struct file *fp,
return OK;
}
}
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
mods_error_printk("phys addr 0x%llx is not mapped\n",
p->physical_address);
return -EINVAL;
@@ -1402,9 +1384,9 @@ static void clear_entry_cache_mappings
int esc_mods_flush_cpu_cache_range(struct file *fp,
struct MODS_FLUSH_CPU_CACHE_RANGE *p)
{
MODS_PRIV private_data = fp->private_data;
struct list_head *head;
struct list_head *iter;
struct mods_client *client = fp->private_data;
struct list_head *head;
struct list_head *iter;
if (irqs_disabled() || in_interrupt() ||
p->virt_addr_start > p->virt_addr_end ||
@@ -1414,12 +1396,12 @@ int esc_mods_flush_cpu_cache_range(struct file *fp,
return -EINVAL;
}
if (unlikely(mutex_lock_interruptible(&private_data->mtx))) {
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
LOG_EXT();
return -EINTR;
}
head = private_data->mods_mapping_list;
head = &client->mem_map_list;
list_for_each(iter, head) {
struct SYS_MAP_MEMORY *p_map_mem
@@ -1455,7 +1437,7 @@ int esc_mods_flush_cpu_cache_range(struct file *fp,
virt_end);
}
}
mutex_unlock(&private_data->mtx);
mutex_unlock(&client->mtx);
return OK;
}

View File

@@ -25,84 +25,6 @@
#include <linux/dma-mapping.h>
#endif
/************************
* PCI HELPER FUNCTIONS *
************************/
static int mods_free_pci_res_map(struct file *fp,
struct MODS_PCI_RES_MAP_INFO *p_del_map)
{
#if defined(MODS_HAS_PCI_MAP_RESOURCE)
struct MODS_PCI_RES_MAP_INFO *p_res_map;
MODS_PRIV private_data = fp->private_data;
struct list_head *head;
struct list_head *iter;
mods_debug_printk(DEBUG_PCI,
"free pci resource map %p\n",
p_del_map);
if (unlikely(mutex_lock_interruptible(&private_data->mtx)))
return -EINTR;
head = private_data->mods_pci_res_map_list;
list_for_each(iter, head) {
p_res_map =
list_entry(iter, struct MODS_PCI_RES_MAP_INFO, list);
if (p_del_map == p_res_map) {
list_del(iter);
mutex_unlock(&private_data->mtx);
pci_unmap_resource(p_res_map->dev,
p_res_map->va,
p_res_map->page_count * PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
mods_debug_printk(DEBUG_PCI,
"unmapped pci resource at 0x%llx from %u:%u:%u.%u\n",
p_res_map->va,
pci_domain_nr(p_res_map->dev->bus),
p_res_map->dev->bus->number,
PCI_SLOT(p_res_map->dev->devfn),
PCI_FUNC(p_res_map->dev->devfn));
kfree(p_res_map);
return OK;
}
}
mutex_unlock(&private_data->mtx);
mods_error_printk("failed to unregister pci resource mapping %p\n",
p_del_map);
return -EINVAL;
#else
return OK;
#endif
}
int mods_unregister_all_pci_res_mappings(struct file *fp)
{
MODS_PRIV private_data = fp->private_data;
struct list_head *head = private_data->mods_pci_res_map_list;
struct list_head *iter;
struct list_head *tmp;
list_for_each_safe(iter, tmp, head) {
struct MODS_PCI_RES_MAP_INFO *p_pci_res_map_info;
int ret;
p_pci_res_map_info =
list_entry(iter, struct MODS_PCI_RES_MAP_INFO, list);
ret = mods_free_pci_res_map(fp, p_pci_res_map_info);
if (ret)
return ret;
}
return OK;
}
/************************
* PCI ESCAPE FUNCTIONS *
************************/
@@ -134,19 +56,6 @@ static int mods_find_pci_dev(struct file *pfile,
p->pci_device.device = PCI_SLOT(dev->devfn);
p->pci_device.function = PCI_FUNC(dev->devfn);
#if defined(CONFIG_PPC64)
/* Enable device on the PCI bus */
if (mods_enable_device(pfile->private_data, dev)) {
mods_error_printk(
"unable to enable dev %04x:%02x:%02x.%x\n",
(unsigned int)p->pci_device.domain,
(unsigned int)p->pci_device.bus,
(unsigned int)p->pci_device.device,
(unsigned int)p->pci_device.function);
return -EINVAL;
}
#endif
return OK;
}
@@ -201,19 +110,6 @@ static int mods_find_pci_class_code(struct file *pfile,
p->pci_device.device = PCI_SLOT(dev->devfn);
p->pci_device.function = PCI_FUNC(dev->devfn);
#if defined(CONFIG_PPC64)
/* Enable device on the PCI bus */
if (mods_enable_device(pfile->private_data, dev)) {
mods_error_printk(
"unable to enable dev %04x:%02x:%02x.%x\n",
(unsigned int)p->pci_device.domain,
(unsigned int)p->pci_device.bus,
(unsigned int)p->pci_device.device,
(unsigned int)p->pci_device.function);
return -EINVAL;
}
#endif
return OK;
}
@@ -264,6 +160,27 @@ int esc_mods_pci_get_bar_info_2(struct file *pfile,
(int) p->pci_device.bus, (int) p->pci_device.device,
(int) p->pci_device.function, (int) p->bar_index);
#if defined(CONFIG_PPC64)
if (unlikely(mutex_lock_interruptible(mods_get_irq_mutex()))) {
LOG_EXT();
return -EINTR;
}
/* Enable device on the PCI bus */
if (mods_enable_device(pfile->private_data, dev) == 0) {
mods_error_printk(
"unable to enable dev %04x:%02x:%02x.%x\n",
(unsigned int)p->pci_device.domain,
(unsigned int)p->pci_device.bus,
(unsigned int)p->pci_device.device,
(unsigned int)p->pci_device.function);
mutex_unlock(mods_get_irq_mutex());
return -EINVAL;
}
mutex_unlock(mods_get_irq_mutex());
#endif
bar_resource_offset = 0;
for (i = 0; i < p->bar_index; i++) {
#if defined(MODS_HAS_IORESOURCE_MEM_64)
@@ -665,188 +582,22 @@ int esc_mods_device_numa_info(struct file *fp,
return OK;
}
int esc_mods_pci_map_resource(struct file *fp,
struct MODS_PCI_MAP_RESOURCE *p)
{
#if defined(MODS_HAS_PCI_MAP_RESOURCE)
MODS_PRIV private_data = fp->private_data;
unsigned int devfn;
struct pci_dev *rem_dev;
struct pci_dev *loc_dev;
struct MODS_PCI_RES_MAP_INFO *p_res_map;
LOG_ENT();
devfn = PCI_DEVFN(p->local_pci_device.device,
p->local_pci_device.function);
loc_dev = MODS_PCI_GET_SLOT(p->local_pci_device.domain,
p->local_pci_device.bus, devfn);
if (!loc_dev) {
mods_error_printk("Local PCI device %04x:%x:%02x.%x not found\n",
p->local_pci_device.domain,
p->local_pci_device.bus,
p->local_pci_device.device,
p->local_pci_device.function);
LOG_EXT();
return -EINVAL;
}
devfn = PCI_DEVFN(p->remote_pci_device.device,
p->remote_pci_device.function);
rem_dev = MODS_PCI_GET_SLOT(p->remote_pci_device.domain,
p->remote_pci_device.bus, devfn);
if (!rem_dev) {
mods_error_printk("Remote PCI device %04x:%x:%02x.%x not found\n",
p->remote_pci_device.domain,
p->remote_pci_device.bus,
p->remote_pci_device.device,
p->remote_pci_device.function);
LOG_EXT();
return -EINVAL;
}
if ((p->resource_index >= DEVICE_COUNT_RESOURCE) ||
!pci_resource_len(rem_dev, p->resource_index)) {
mods_error_printk(
"Resource %u on device %04x:%x:%02x.%x not found\n",
p->resource_index,
p->remote_pci_device.domain,
p->remote_pci_device.bus,
p->remote_pci_device.device,
p->remote_pci_device.function);
LOG_EXT();
return -EINVAL;
}
if ((p->va < pci_resource_start(rem_dev, p->resource_index)) ||
(p->va > pci_resource_end(rem_dev, p->resource_index)) ||
(p->va + p->page_count * PAGE_SIZE >
pci_resource_end(rem_dev, p->resource_index))) {
mods_error_printk(
"bad resource address 0x%04x:%x:%02x.%x on device %u:%u:%u.%u\n"
(unsigned long long)p->va,
p->remote_pci_device.domain,
p->remote_pci_device.bus,
p->remote_pci_device.device,
p->remote_pci_device.function);
LOG_EXT();
return -EINVAL;
}
p_res_map = kmalloc(sizeof(struct MODS_PCI_RES_MAP_INFO),
GFP_KERNEL | __GFP_NORETRY);
if (unlikely(!p_res_map)) {
mods_error_printk("failed to allocate pci res map struct\n");
LOG_EXT();
return -ENOMEM;
}
p_res_map->dev = loc_dev;
p_res_map->page_count = p->page_count;
p_res_map->va = pci_map_resource(loc_dev,
&rem_dev->resource[resource_index],
p->va - pci_resource_start(rem_dev, p->resource_index),
p->page_count * PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
p_res_map->va = p->va;
if (pci_dma_mapping_error(loc_dev, p_res_map->va)) {
kfree(p_res_map);
LOG_EXT();
return -ENOMEM;
}
if (unlikely(mutex_lock_interruptible(&private_data->mtx))) {
kfree(p_res_map);
LOG_EXT();
return -EINTR;
}
list_add(&p_res_map->list, private_data->mods_pci_res_map_list);
mutex_unlock(&private_data->mtx);
p->va = p_res_map->va;
mods_debug_printk(DEBUG_PCI,
"mapped pci resource %u from %u:%u:%u.%u to %u:%u:%u.%u at 0x%llx\n",
p->resource_index,
p->remote_pci_device.domain,
p->remote_pci_device.bus,
p->remote_pci_device.device,
p->remote_pci_device.function,
p->local_pci_device.domain,
p->local_pci_device.bus,
p->local_pci_device.device,
p->local_pci_device.function,
p->va);
#else
/*
* We still return OK, in case the system is running an older kernel
* with the IOMMU disabled. The va parameter will still contain the
* input physical address, which is what the device should use in this
* fallback case.
*/
#endif
return OK;
}
int esc_mods_pci_unmap_resource(struct file *fp,
struct MODS_PCI_UNMAP_RESOURCE *p)
{
#if defined(MODS_HAS_PCI_MAP_RESOURCE)
MODS_PRIV private_data = fp->private_data;
unsigned int devfn = PCI_DEVFN(p->pci_device.device,
p->pci_device.function);
struct pci_dev *dev = MODS_PCI_GET_SLOT(p->pci_device.domain,
p->pci_device.bus, devfn);
struct list_head *head = private_data->mods_pci_res_map_list;
struct list_head *iter;
struct list_head *tmp;
LOG_ENT();
if (!dev) {
mods_error_printk("PCI device %04x:%x:%02x.%x not found\n",
p->pci_device.domain,
p->pci_device.bus,
p->pci_device.device,
p->pci_device.function);
LOG_EXT();
return -EINVAL;
}
list_for_each_safe(iter, tmp, head) {
struct MODS_PCI_RES_MAP_INFO *p_pci_res_map_info;
p_pci_res_map_info =
list_entry(iter, struct MODS_PCI_RES_MAP_INFO, list);
if ((p_pci_res_map_info->dev == dev) &&
(p_pci_res_map_info->va == p->va)) {
int ret = mods_free_pci_res_map(fp, p_pci_res_map_info);
LOG_EXT();
return ret;
}
}
mods_error_printk(
"PCI mapping 0x%llx on device %04x:%x:%02x.%x not found\n",
p->va,
p->pci_device.domain,
p->pci_device.bus,
p->pci_device.device,
p->pci_device.function);
return -EINVAL;
#else
return OK;
#endif
}
int esc_mods_get_iommu_state(struct file *pfile,
struct MODS_GET_IOMMU_STATE *state)
{
int err = esc_mods_get_iommu_state_2(pfile, state);
if (!err)
state->state = (state->state == MODS_SWIOTLB_DISABLED) ? 1 : 0;
return err;
}
int esc_mods_get_iommu_state_2(struct file *pfile,
struct MODS_GET_IOMMU_STATE *state)
{
#if !defined(CONFIG_SWIOTLB)
/* SWIOTLB turned off in the kernel, HW IOMMU active */
state->state = 1;
state->state = MODS_SWIOTLB_DISABLED;
#elif defined(MODS_HAS_DMA_OPS) && \
(defined(MODS_HAS_NONCOH_DMA_OPS) || defined(MODS_HAS_MAP_SG_ATTRS))
@@ -859,18 +610,53 @@ int esc_mods_get_iommu_state(struct file *pfile,
const struct dma_map_ops *ops = get_dma_ops(&dev->dev);
#if defined(MODS_HAS_NONCOH_DMA_OPS)
state->state = ops != &noncoherent_swiotlb_dma_ops &&
ops != &coherent_swiotlb_dma_ops;
state->state = (ops != &noncoherent_swiotlb_dma_ops &&
ops != &coherent_swiotlb_dma_ops)
? MODS_SWIOTLB_DISABLED : MODS_SWIOTLB_ACTIVE;
#else
state->state = ops->map_sg != swiotlb_map_sg_attrs;
state->state = ops->map_sg != swiotlb_map_sg_attrs
? MODS_SWIOTLB_DISABLED : MODS_SWIOTLB_ACTIVE;
#endif
#elif defined(CONFIG_PPC64) || defined(CONFIG_ARM64)
/* Old/new kernels, no way to detect, default to HW IOMMU active */
state->state = 1;
/* No way to detect, assume SW I/O TLB is disabled on ppc64/arm64 */
state->state = MODS_SWIOTLB_DISABLED;
#else
/* Old/new kernels, no way to detect, on x86 default to no IOMMU */
state->state = 0;
/* No way to detect on old kernel */
state->state = MODS_SWIOTLB_INDETERMINATE;
#endif
return OK;
}
int esc_mods_pci_set_dma_mask(struct file *file,
struct MODS_PCI_DMA_MASK *dma_mask)
{
int err;
unsigned int devfn = PCI_DEVFN(dma_mask->pci_device.device,
dma_mask->pci_device.function);
struct pci_dev *dev = MODS_PCI_GET_SLOT(dma_mask->pci_device.domain,
dma_mask->pci_device.bus,
devfn);
u64 mask;
if (dma_mask->num_bits > 64)
return -EINVAL;
mask = dma_mask->num_bits == 64 ? ~0ULL : (1ULL<<dma_mask->num_bits)-1;
err = pci_set_dma_mask(dev, mask);
if (err) {
mods_error_printk("failed to set dma mask 0x%llx for dev %04x:%x:%02x.%x\n",
mask,
(unsigned int)dma_mask->pci_device.domain,
(unsigned int)dma_mask->pci_device.bus,
(unsigned int)dma_mask->pci_device.device,
(unsigned int)dma_mask->pci_device.function);
#if defined(CONFIG_PPC64)
/* Ignore error if TCE bypass is on */
if (dev->dma_mask == ~0ULL)
err = OK;
#endif
} else
err = pci_set_consistent_dma_mask(dev, mask);
return err;
}