mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-23 01:31:30 +03:00
misc: mods: update MODS kernel driver to 3.99
Change-Id: I7ca22718af4e4f897ec0d410949fa1c14022eec1 Signed-off-by: Chris Dragan <kdragan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2422786 Reviewed-by: automaticguardword <automaticguardword@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Sachin Nikam <snikam@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Laxman Dewangan
parent
7d821e9865
commit
125e88df0a
@@ -151,6 +151,17 @@ static int mods_extract_acpi_object(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case ACPI_TYPE_LOCAL_REFERENCE:
|
||||||
|
if (obj->reference.actual_type == ACPI_TYPE_POWER) {
|
||||||
|
memcpy(*buf, &obj->reference.handle,
|
||||||
|
sizeof(obj->reference.handle));
|
||||||
|
*buf += sizeof(obj->reference.handle);
|
||||||
|
} else {
|
||||||
|
cl_error("Unsupported ACPI reference type\n");
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
cl_error("unsupported ACPI output type 0x%02x from method %s\n",
|
cl_error("unsupported ACPI output type 0x%02x from method %s\n",
|
||||||
(unsigned int)obj->type,
|
(unsigned int)obj->type,
|
||||||
@@ -269,6 +280,9 @@ static int mods_eval_acpi_method(struct mods_client *client,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
input.count = p->argument_count;
|
||||||
|
input.pointer = acpi_params;
|
||||||
|
|
||||||
for (i = 0; i < p->argument_count; i++) {
|
for (i = 0; i < p->argument_count; i++) {
|
||||||
switch (p->argument[i].type) {
|
switch (p->argument[i].type) {
|
||||||
case ACPI_MODS_TYPE_INTEGER: {
|
case ACPI_MODS_TYPE_INTEGER: {
|
||||||
@@ -285,6 +299,28 @@ static int mods_eval_acpi_method(struct mods_client *client,
|
|||||||
= p->in_buffer + p->argument[i].buffer.offset;
|
= p->in_buffer + p->argument[i].buffer.offset;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case ACPI_MODS_TYPE_METHOD: {
|
||||||
|
memcpy(&acpi_method_handler,
|
||||||
|
&p->argument[i].method.handle,
|
||||||
|
sizeof(acpi_method_handler));
|
||||||
|
|
||||||
|
if (!acpi_method_handler) {
|
||||||
|
cl_error("ACPI: Invalid reference handle 0\n");
|
||||||
|
pci_dev_put(dev);
|
||||||
|
LOG_EXT();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i != p->argument_count - 1) {
|
||||||
|
cl_error("ACPI: Invalid argument count\n");
|
||||||
|
pci_dev_put(dev);
|
||||||
|
LOG_EXT();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
--input.count;
|
||||||
|
break;
|
||||||
|
}
|
||||||
default: {
|
default: {
|
||||||
cl_error("unsupported ACPI argument type\n");
|
cl_error("unsupported ACPI argument type\n");
|
||||||
pci_dev_put(dev);
|
pci_dev_put(dev);
|
||||||
@@ -294,9 +330,6 @@ static int mods_eval_acpi_method(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
input.count = p->argument_count;
|
|
||||||
input.pointer = acpi_params;
|
|
||||||
|
|
||||||
status = acpi_evaluate_object(acpi_method_handler,
|
status = acpi_evaluate_object(acpi_method_handler,
|
||||||
pdevice ? p->method_name : NULL,
|
pdevice ? p->method_name : NULL,
|
||||||
&input,
|
&input,
|
||||||
|
|||||||
@@ -543,6 +543,8 @@ int esc_mods_get_iommu_state_2(struct mods_client *client,
|
|||||||
struct MODS_GET_IOMMU_STATE *state);
|
struct MODS_GET_IOMMU_STATE *state);
|
||||||
int esc_mods_pci_set_dma_mask(struct mods_client *client,
|
int esc_mods_pci_set_dma_mask(struct mods_client *client,
|
||||||
struct MODS_PCI_DMA_MASK *dma_mask);
|
struct MODS_PCI_DMA_MASK *dma_mask);
|
||||||
|
int esc_mods_pci_reset_function(struct mods_client *client,
|
||||||
|
struct mods_pci_dev_2 *pcidev);
|
||||||
#endif
|
#endif
|
||||||
/* irq */
|
/* irq */
|
||||||
#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
|
#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
|
||||||
|
|||||||
@@ -446,8 +446,8 @@ static int __init mods_init_module(void)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF_RESOLVE) && \
|
#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_COMMON_CLK) && \
|
||||||
defined(CONFIG_OF_DYNAMIC)
|
defined(CONFIG_OF_RESOLVE) && defined(CONFIG_OF_DYNAMIC)
|
||||||
mods_init_clock_api();
|
mods_init_clock_api();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -500,8 +500,8 @@ static void __exit mods_exit_module(void)
|
|||||||
|
|
||||||
misc_deregister(&mods_dev);
|
misc_deregister(&mods_dev);
|
||||||
|
|
||||||
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF_RESOLVE) && \
|
#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_COMMON_CLK) && \
|
||||||
defined(CONFIG_OF_DYNAMIC)
|
defined(CONFIG_OF_RESOLVE) && defined(CONFIG_OF_DYNAMIC)
|
||||||
mods_shutdown_clock_api();
|
mods_shutdown_clock_api();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -1525,17 +1525,19 @@ static int esc_mods_verify_access_token(struct mods_client *client,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mods_sysfs_work {
|
struct mods_file_work {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct MODS_SYSFS_NODE *pdata;
|
const char *path;
|
||||||
int err;
|
const char *data;
|
||||||
|
__u32 data_size;
|
||||||
|
int err;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void sysfs_write_task(struct work_struct *w)
|
static void sysfs_write_task(struct work_struct *w)
|
||||||
{
|
{
|
||||||
struct mods_sysfs_work *task = container_of(w,
|
struct mods_file_work *task = container_of(w,
|
||||||
struct mods_sysfs_work,
|
struct mods_file_work,
|
||||||
work);
|
work);
|
||||||
struct file *f;
|
struct file *f;
|
||||||
mm_segment_t old_fs;
|
mm_segment_t old_fs;
|
||||||
|
|
||||||
@@ -1546,16 +1548,15 @@ static void sysfs_write_task(struct work_struct *w)
|
|||||||
old_fs = get_fs();
|
old_fs = get_fs();
|
||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
|
|
||||||
f = filp_open(task->pdata->path, O_WRONLY, 0);
|
f = filp_open(task->path, O_WRONLY, 0);
|
||||||
if (IS_ERR(f))
|
if (IS_ERR(f))
|
||||||
task->err = PTR_ERR(f);
|
task->err = PTR_ERR(f);
|
||||||
else {
|
else {
|
||||||
f->f_pos = 0;
|
f->f_pos = 0;
|
||||||
if (task->pdata->size <= MODS_MAX_SYSFS_FILE_SIZE)
|
task->err = f->f_op->write(f,
|
||||||
task->err = f->f_op->write(f,
|
task->data,
|
||||||
task->pdata->contents,
|
task->data_size,
|
||||||
task->pdata->size,
|
&f->f_pos);
|
||||||
&f->f_pos);
|
|
||||||
filp_close(f, NULL);
|
filp_close(f, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1564,35 +1565,82 @@ static void sysfs_write_task(struct work_struct *w)
|
|||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int esc_mods_write_sysfs_node(struct mods_client *client,
|
static int run_write_task(struct mods_client *client,
|
||||||
struct MODS_SYSFS_NODE *pdata)
|
struct mods_file_work *task)
|
||||||
{
|
{
|
||||||
int err = -EINVAL;
|
|
||||||
struct mods_sysfs_work task;
|
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
|
|
||||||
LOG_ENT();
|
wq = create_singlethread_workqueue("mods_file_write");
|
||||||
|
|
||||||
memmove(&pdata->path[5], pdata->path, MODS_MAX_SYSFS_PATH_LEN);
|
|
||||||
memcpy(pdata->path, "/sys/", 5);
|
|
||||||
pdata->path[MODS_MAX_SYSFS_PATH_BUF_SIZE - 1] = 0;
|
|
||||||
|
|
||||||
task.pdata = pdata;
|
|
||||||
|
|
||||||
wq = create_singlethread_workqueue("mods_sysfs_write");
|
|
||||||
if (!wq) {
|
if (!wq) {
|
||||||
LOG_EXT();
|
cl_error("failed to create work queue\n");
|
||||||
return err;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_WORK(&task.work, sysfs_write_task);
|
cl_info("write %.*s to %s\n", task->data_size, task->data, task->path);
|
||||||
queue_work(wq, &task.work);
|
|
||||||
|
INIT_WORK(&task->work, sysfs_write_task);
|
||||||
|
queue_work(wq, &task->work);
|
||||||
flush_workqueue(wq);
|
flush_workqueue(wq);
|
||||||
destroy_workqueue(wq);
|
destroy_workqueue(wq);
|
||||||
|
|
||||||
err = task.err;
|
if (task->err < 0)
|
||||||
if (err > 0)
|
cl_error("failed to write %.*s to %s\n",
|
||||||
err = OK;
|
task->data_size, task->data, task->path);
|
||||||
|
|
||||||
|
return (task->err > 0) ? 0 : task->err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int esc_mods_write_sysfs_node(struct mods_client *client,
|
||||||
|
struct MODS_SYSFS_NODE *pdata)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
struct mods_file_work task;
|
||||||
|
|
||||||
|
LOG_ENT();
|
||||||
|
|
||||||
|
if (pdata->size > MODS_MAX_SYSFS_FILE_SIZE) {
|
||||||
|
cl_error("invalid data size %u, max allowed is %u\n",
|
||||||
|
pdata->size, MODS_MAX_SYSFS_FILE_SIZE);
|
||||||
|
LOG_EXT();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
memmove(&pdata->path[5], pdata->path, sizeof(pdata->path) - 5);
|
||||||
|
memcpy(pdata->path, "/sys/", 5);
|
||||||
|
pdata->path[sizeof(pdata->path) - 1] = 0;
|
||||||
|
|
||||||
|
task.path = pdata->path;
|
||||||
|
task.data = pdata->contents;
|
||||||
|
task.data_size = pdata->size;
|
||||||
|
|
||||||
|
err = run_write_task(client, &task);
|
||||||
|
|
||||||
|
LOG_EXT();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int esc_mods_sysctl_write_int(struct mods_client *client,
|
||||||
|
struct MODS_SYSCTL_INT *pdata)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
struct mods_file_work task;
|
||||||
|
char data[21];
|
||||||
|
int data_size;
|
||||||
|
|
||||||
|
LOG_ENT();
|
||||||
|
|
||||||
|
memmove(&pdata->path[10], pdata->path, sizeof(pdata->path) - 10);
|
||||||
|
memcpy(pdata->path, "/proc/sys/", 10);
|
||||||
|
pdata->path[sizeof(pdata->path) - 1] = 0;
|
||||||
|
|
||||||
|
data_size = snprintf(data, sizeof(data),
|
||||||
|
"%lld", (long long)pdata->value);
|
||||||
|
|
||||||
|
task.path = pdata->path;
|
||||||
|
task.data = data;
|
||||||
|
task.data_size = data_size;
|
||||||
|
|
||||||
|
err = run_write_task(client, &task);
|
||||||
|
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return err;
|
return err;
|
||||||
@@ -1862,6 +1910,12 @@ static long mods_krnl_ioctl(struct file *fp,
|
|||||||
esc_mods_pci_set_dma_mask,
|
esc_mods_pci_set_dma_mask,
|
||||||
MODS_PCI_DMA_MASK);
|
MODS_PCI_DMA_MASK);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case MODS_ESC_PCI_RESET_FUNCTION:
|
||||||
|
MODS_IOCTL(MODS_ESC_PCI_RESET_FUNCTION,
|
||||||
|
esc_mods_pci_reset_function,
|
||||||
|
mods_pci_dev_2);
|
||||||
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
case MODS_ESC_ALLOC_PAGES:
|
case MODS_ESC_ALLOC_PAGES:
|
||||||
@@ -2114,8 +2168,8 @@ static long mods_krnl_ioctl(struct file *fp,
|
|||||||
esc_mods_get_kernel_version, MODS_GET_VERSION);
|
esc_mods_get_kernel_version, MODS_GET_VERSION);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF_RESOLVE) && \
|
#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_COMMON_CLK) && \
|
||||||
defined(CONFIG_OF_DYNAMIC)
|
defined(CONFIG_OF_RESOLVE) && defined(CONFIG_OF_DYNAMIC)
|
||||||
case MODS_ESC_GET_CLOCK_HANDLE:
|
case MODS_ESC_GET_CLOCK_HANDLE:
|
||||||
MODS_IOCTL(MODS_ESC_GET_CLOCK_HANDLE,
|
MODS_IOCTL(MODS_ESC_GET_CLOCK_HANDLE,
|
||||||
esc_mods_get_clock_handle, MODS_GET_CLOCK_HANDLE);
|
esc_mods_get_clock_handle, MODS_GET_CLOCK_HANDLE);
|
||||||
@@ -2398,6 +2452,12 @@ static long mods_krnl_ioctl(struct file *fp,
|
|||||||
MODS_SYSFS_NODE);
|
MODS_SYSFS_NODE);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case MODS_ESC_SYSCTL_WRITE_INT:
|
||||||
|
MODS_IOCTL_NORETVAL(MODS_ESC_SYSCTL_WRITE_INT,
|
||||||
|
esc_mods_sysctl_write_int,
|
||||||
|
MODS_SYSCTL_INT);
|
||||||
|
break;
|
||||||
|
|
||||||
case MODS_ESC_REGISTER_IRQ_4:
|
case MODS_ESC_REGISTER_IRQ_4:
|
||||||
MODS_IOCTL_NORETVAL(MODS_ESC_REGISTER_IRQ_4,
|
MODS_IOCTL_NORETVAL(MODS_ESC_REGISTER_IRQ_4,
|
||||||
esc_mods_register_irq_4, MODS_REGISTER_IRQ_4);
|
esc_mods_register_irq_4, MODS_REGISTER_IRQ_4);
|
||||||
|
|||||||
@@ -290,7 +290,7 @@ static int mods_dma_map_default_page(struct mods_client *client,
|
|||||||
u64 dev_addr;
|
u64 dev_addr;
|
||||||
int err = pci_map_chunk(client, dev, chunk, &dev_addr);
|
int err = pci_map_chunk(client, dev, chunk, &dev_addr);
|
||||||
|
|
||||||
if (err)
|
if (unlikely(err))
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
chunk->dev_addr = dev_addr;
|
chunk->dev_addr = dev_addr;
|
||||||
@@ -333,7 +333,7 @@ static int mods_create_default_dma_map(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = mods_dma_map_default_page(client, chunk, dev);
|
err = mods_dma_map_default_page(client, chunk, dev);
|
||||||
if (err)
|
if (unlikely(err))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -361,17 +361,6 @@ static struct MODS_DMA_MAP *find_dma_map(struct MODS_MEM_INFO *p_mem_info,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_ARCH_TEGRA) || defined(CONFIG_CPA)
|
|
||||||
static int mods_set_mem_type(u64 virt_addr, u64 pages, u8 type)
|
|
||||||
{
|
|
||||||
if (type == MODS_ALLOC_UNCACHED)
|
|
||||||
return MODS_SET_MEMORY_UC(virt_addr, pages);
|
|
||||||
else if (type == MODS_ALLOC_WRITECOMBINE)
|
|
||||||
return MODS_SET_MEMORY_WC(virt_addr, pages);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* In order to map pages as UC or WC to the CPU, we need to change their
|
/* In order to map pages as UC or WC to the CPU, we need to change their
|
||||||
* attributes by calling set_memory_uc()/set_memory_wc(), respectively.
|
* attributes by calling set_memory_uc()/set_memory_wc(), respectively.
|
||||||
* On some CPUs this operation is extremely slow. In order to incur
|
* On some CPUs this operation is extremely slow. In order to incur
|
||||||
@@ -387,8 +376,9 @@ static int save_non_wb_chunks(struct mods_client *client,
|
|||||||
if (p_mem_info->cache_type == MODS_ALLOC_CACHED)
|
if (p_mem_info->cache_type == MODS_ALLOC_CACHED)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx)))
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
return -EINTR;
|
if (unlikely(err))
|
||||||
|
return err;
|
||||||
|
|
||||||
/* Steal the chunks from MODS_MEM_INFO and put them on free list. */
|
/* Steal the chunks from MODS_MEM_INFO and put them on free list. */
|
||||||
|
|
||||||
@@ -403,7 +393,7 @@ static int save_non_wb_chunks(struct mods_client *client,
|
|||||||
free_chunk = kzalloc(sizeof(struct MODS_FREE_PHYS_CHUNK),
|
free_chunk = kzalloc(sizeof(struct MODS_FREE_PHYS_CHUNK),
|
||||||
GFP_KERNEL | __GFP_NORETRY);
|
GFP_KERNEL | __GFP_NORETRY);
|
||||||
|
|
||||||
if (!free_chunk) {
|
if (unlikely(!free_chunk)) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -449,16 +439,16 @@ static int mods_restore_cache_one_chunk(struct page *p_page, u8 order)
|
|||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
u64 ptr = (u64)(size_t)kmap(p_page + i);
|
void *ptr = kmap(p_page + i);
|
||||||
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (ptr) {
|
if (likely(ptr))
|
||||||
int err = MODS_SET_MEMORY_WB(ptr, 1);
|
err = MODS_SET_MEMORY_WB((unsigned long)ptr, 1);
|
||||||
|
|
||||||
if (!final_err)
|
kunmap(ptr);
|
||||||
final_err = err;
|
|
||||||
}
|
|
||||||
|
|
||||||
kunmap((void *)(size_t)ptr);
|
if (likely(!final_err))
|
||||||
|
final_err = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return final_err;
|
return final_err;
|
||||||
@@ -471,12 +461,11 @@ static int release_free_chunks(struct mods_client *client)
|
|||||||
struct list_head *next;
|
struct list_head *next;
|
||||||
int final_err = 0;
|
int final_err = 0;
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx)))
|
mutex_lock(&client->mtx);
|
||||||
return -EINTR;
|
|
||||||
|
|
||||||
head = &client->free_mem_list;
|
head = &client->free_mem_list;
|
||||||
|
|
||||||
list_for_each_safe(iter, next, head) {
|
list_for_each_prev_safe(iter, next, head) {
|
||||||
|
|
||||||
struct MODS_FREE_PHYS_CHUNK *free_chunk;
|
struct MODS_FREE_PHYS_CHUNK *free_chunk;
|
||||||
int err;
|
int err;
|
||||||
@@ -489,8 +478,7 @@ static int release_free_chunks(struct mods_client *client)
|
|||||||
|
|
||||||
err = mods_restore_cache_one_chunk(free_chunk->p_page,
|
err = mods_restore_cache_one_chunk(free_chunk->p_page,
|
||||||
free_chunk->order);
|
free_chunk->order);
|
||||||
|
if (likely(!final_err))
|
||||||
if (!final_err)
|
|
||||||
final_err = err;
|
final_err = err;
|
||||||
|
|
||||||
__free_pages(free_chunk->p_page, free_chunk->order);
|
__free_pages(free_chunk->p_page, free_chunk->order);
|
||||||
@@ -502,10 +490,14 @@ static int release_free_chunks(struct mods_client *client)
|
|||||||
|
|
||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
|
|
||||||
|
if (unlikely(final_err))
|
||||||
|
cl_error("failed to restore cache attributes\n");
|
||||||
|
|
||||||
return final_err;
|
return final_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mods_restore_cache(struct MODS_MEM_INFO *p_mem_info)
|
static int mods_restore_cache(struct mods_client *client,
|
||||||
|
struct MODS_MEM_INFO *p_mem_info)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int final_err = 0;
|
int final_err = 0;
|
||||||
@@ -522,10 +514,13 @@ static int mods_restore_cache(struct MODS_MEM_INFO *p_mem_info)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
err = mods_restore_cache_one_chunk(chunk->p_page, chunk->order);
|
err = mods_restore_cache_one_chunk(chunk->p_page, chunk->order);
|
||||||
if (!final_err)
|
if (likely(!final_err))
|
||||||
final_err = err;
|
final_err = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(final_err))
|
||||||
|
cl_error("failed to restore cache attributes\n");
|
||||||
|
|
||||||
return final_err;
|
return final_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,7 +529,7 @@ static void mods_free_pages(struct mods_client *client,
|
|||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
mods_restore_cache(p_mem_info);
|
mods_restore_cache(client, p_mem_info);
|
||||||
|
|
||||||
#if defined(CONFIG_ARCH_TEGRA)
|
#if defined(CONFIG_ARCH_TEGRA)
|
||||||
if (p_mem_info->iommu_mapped)
|
if (p_mem_info->iommu_mapped)
|
||||||
@@ -599,7 +594,8 @@ static struct page *mods_alloc_pages(struct mods_client *client,
|
|||||||
u8 dma32 = p_mem_info->dma32;
|
u8 dma32 = p_mem_info->dma32;
|
||||||
int numa_node = p_mem_info->numa_node;
|
int numa_node = p_mem_info->numa_node;
|
||||||
|
|
||||||
if (likely(!mutex_lock_interruptible(&client->mtx))) {
|
if ((cache_type != MODS_MEMORY_CACHED) &&
|
||||||
|
likely(!mutex_lock_interruptible(&client->mtx))) {
|
||||||
|
|
||||||
struct list_head *iter;
|
struct list_head *iter;
|
||||||
struct list_head *head = &client->free_mem_list;
|
struct list_head *head = &client->free_mem_list;
|
||||||
@@ -647,7 +643,7 @@ static struct page *mods_alloc_pages(struct mods_client *client,
|
|||||||
|
|
||||||
*need_cup = 1;
|
*need_cup = 1;
|
||||||
|
|
||||||
if (p_page)
|
if (likely(p_page))
|
||||||
atomic_add(1u << order, &client->num_pages);
|
atomic_add(1u << order, &client->num_pages);
|
||||||
|
|
||||||
return p_page;
|
return p_page;
|
||||||
@@ -656,6 +652,7 @@ static struct page *mods_alloc_pages(struct mods_client *client,
|
|||||||
static int mods_alloc_contig_sys_pages(struct mods_client *client,
|
static int mods_alloc_contig_sys_pages(struct mods_client *client,
|
||||||
struct MODS_MEM_INFO *p_mem_info)
|
struct MODS_MEM_INFO *p_mem_info)
|
||||||
{
|
{
|
||||||
|
int err = -ENOMEM;
|
||||||
u64 phys_addr;
|
u64 phys_addr;
|
||||||
u64 dma_addr;
|
u64 dma_addr;
|
||||||
u64 end_addr = 0;
|
u64 end_addr = 0;
|
||||||
@@ -671,10 +668,8 @@ static int mods_alloc_contig_sys_pages(struct mods_client *client,
|
|||||||
|
|
||||||
p_page = mods_alloc_pages(client, p_mem_info, order, &is_wb);
|
p_page = mods_alloc_pages(client, p_mem_info, order, &is_wb);
|
||||||
|
|
||||||
if (!p_page) {
|
if (unlikely(!p_page))
|
||||||
LOG_EXT();
|
goto failed;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
p_mem_info->pages[0].p_page = p_page;
|
p_mem_info->pages[0].p_page = p_page;
|
||||||
|
|
||||||
@@ -682,20 +677,16 @@ static int mods_alloc_contig_sys_pages(struct mods_client *client,
|
|||||||
p_mem_info->pages[0].wc = 1;
|
p_mem_info->pages[0].wc = 1;
|
||||||
|
|
||||||
phys_addr = page_to_phys(p_page);
|
phys_addr = page_to_phys(p_page);
|
||||||
if (phys_addr == 0) {
|
if (unlikely(phys_addr == 0)) {
|
||||||
cl_error("failed to determine physical address\n");
|
cl_error("failed to determine physical address\n");
|
||||||
mods_free_pages(client, p_mem_info);
|
goto failed;
|
||||||
LOG_EXT();
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
dma_addr = MODS_PHYS_TO_DMA(phys_addr);
|
dma_addr = MODS_PHYS_TO_DMA(phys_addr);
|
||||||
|
|
||||||
if (dma_addr >= (1ULL << DMA_BITS)) {
|
if (unlikely(dma_addr >= (1ULL << DMA_BITS))) {
|
||||||
cl_error("dma_addr 0x%llx exceeds supported range\n",
|
cl_error("dma_addr 0x%llx exceeds supported range\n",
|
||||||
dma_addr);
|
dma_addr);
|
||||||
mods_free_pages(client, p_mem_info);
|
goto failed;
|
||||||
LOG_EXT();
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p_mem_info->pages[0].dma_addr = dma_addr;
|
p_mem_info->pages[0].dma_addr = dma_addr;
|
||||||
@@ -711,21 +702,16 @@ static int mods_alloc_contig_sys_pages(struct mods_client *client,
|
|||||||
|
|
||||||
end_addr = dma_addr +
|
end_addr = dma_addr +
|
||||||
((unsigned long)p_mem_info->num_pages << PAGE_SHIFT);
|
((unsigned long)p_mem_info->num_pages << PAGE_SHIFT);
|
||||||
if ((p_mem_info->dma32) &&
|
if (unlikely(p_mem_info->dma32 && (end_addr > 0x100000000ULL))) {
|
||||||
(end_addr > 0x100000000ULL)) {
|
|
||||||
cl_error("allocation exceeds 32-bit addressing\n");
|
cl_error("allocation exceeds 32-bit addressing\n");
|
||||||
mods_free_pages(client, p_mem_info);
|
goto failed;
|
||||||
LOG_EXT();
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mods_post_alloc(client, p_mem_info->pages, phys_addr, p_mem_info)) {
|
err = mods_post_alloc(client, p_mem_info->pages, phys_addr, p_mem_info);
|
||||||
mods_free_pages(client, p_mem_info);
|
|
||||||
LOG_EXT();
|
failed:
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 mods_get_max_order_needed(u32 num_pages)
|
static u32 mods_get_max_order_needed(u32 num_pages)
|
||||||
@@ -740,6 +726,7 @@ static u32 mods_get_max_order_needed(u32 num_pages)
|
|||||||
static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
||||||
struct MODS_MEM_INFO *p_mem_info)
|
struct MODS_MEM_INFO *p_mem_info)
|
||||||
{
|
{
|
||||||
|
int err;
|
||||||
u32 pages_left = p_mem_info->num_pages;
|
u32 pages_left = p_mem_info->num_pages;
|
||||||
u32 num_chunks = 0;
|
u32 num_chunks = 0;
|
||||||
|
|
||||||
@@ -748,7 +735,6 @@ static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
|||||||
memset(p_mem_info->pages, 0,
|
memset(p_mem_info->pages, 0,
|
||||||
p_mem_info->num_chunks * sizeof(p_mem_info->pages[0]));
|
p_mem_info->num_chunks * sizeof(p_mem_info->pages[0]));
|
||||||
|
|
||||||
/* alloc pages */
|
|
||||||
while (pages_left > 0) {
|
while (pages_left > 0) {
|
||||||
u64 phys_addr = 0;
|
u64 phys_addr = 0;
|
||||||
u64 dma_addr = 0;
|
u64 dma_addr = 0;
|
||||||
@@ -756,6 +742,13 @@ static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
|||||||
int is_wb = 1;
|
int is_wb = 1;
|
||||||
struct MODS_PHYS_CHUNK *chunk = &p_mem_info->pages[num_chunks];
|
struct MODS_PHYS_CHUNK *chunk = &p_mem_info->pages[num_chunks];
|
||||||
|
|
||||||
|
/* Fail if memory fragmentation is very high */
|
||||||
|
if (unlikely(num_chunks >= p_mem_info->num_chunks)) {
|
||||||
|
cl_error("detected high memory fragmentation\n");
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
chunk->p_page = mods_alloc_pages(client,
|
chunk->p_page = mods_alloc_pages(client,
|
||||||
p_mem_info,
|
p_mem_info,
|
||||||
@@ -768,8 +761,9 @@ static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
|||||||
--order;
|
--order;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!chunk->p_page) {
|
if (unlikely(!chunk->p_page)) {
|
||||||
cl_error("out of memory\n");
|
cl_error("out of memory\n");
|
||||||
|
err = -ENOMEM;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -780,15 +774,17 @@ static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
|||||||
chunk->order = order;
|
chunk->order = order;
|
||||||
|
|
||||||
phys_addr = page_to_phys(chunk->p_page);
|
phys_addr = page_to_phys(chunk->p_page);
|
||||||
if (phys_addr == 0) {
|
if (unlikely(phys_addr == 0)) {
|
||||||
cl_error("phys addr lookup failed\n");
|
cl_error("phys addr lookup failed\n");
|
||||||
|
err = -ENOMEM;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
dma_addr = MODS_PHYS_TO_DMA(phys_addr);
|
dma_addr = MODS_PHYS_TO_DMA(phys_addr);
|
||||||
|
|
||||||
if (dma_addr >= (1ULL << DMA_BITS)) {
|
if (unlikely(dma_addr >= (1ULL << DMA_BITS))) {
|
||||||
cl_error("dma_addr 0x%llx exceeds supported range\n",
|
cl_error("dma_addr 0x%llx exceeds supported range\n",
|
||||||
dma_addr);
|
dma_addr);
|
||||||
|
err = -ENOMEM;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -805,23 +801,25 @@ static int mods_alloc_noncontig_sys_pages(struct mods_client *client,
|
|||||||
|
|
||||||
++num_chunks;
|
++num_chunks;
|
||||||
|
|
||||||
if (mods_post_alloc(client, chunk, phys_addr, p_mem_info))
|
err = mods_post_alloc(client, chunk, phys_addr, p_mem_info);
|
||||||
|
if (unlikely(err))
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
err = 0;
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
mods_free_pages(client, p_mem_info);
|
LOG_EXT();
|
||||||
return -ENOMEM;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mods_register_alloc(struct mods_client *client,
|
static int mods_register_alloc(struct mods_client *client,
|
||||||
struct MODS_MEM_INFO *p_mem_info)
|
struct MODS_MEM_INFO *p_mem_info)
|
||||||
{
|
{
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx)))
|
int err = mutex_lock_interruptible(&client->mtx);
|
||||||
return -EINTR;
|
|
||||||
|
|
||||||
|
if (unlikely(err))
|
||||||
|
return err;
|
||||||
list_add(&p_mem_info->list, &client->mem_alloc_list);
|
list_add(&p_mem_info->list, &client->mem_alloc_list);
|
||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
return OK;
|
return OK;
|
||||||
@@ -853,8 +851,7 @@ static int mods_unregister_and_free(struct mods_client *client,
|
|||||||
|
|
||||||
cl_debug(DEBUG_MEM_DETAILED, "free %p\n", p_del_mem);
|
cl_debug(DEBUG_MEM_DETAILED, "free %p\n", p_del_mem);
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx)))
|
mutex_lock(&client->mtx);
|
||||||
return -EINTR;
|
|
||||||
|
|
||||||
head = &client->mem_alloc_list;
|
head = &client->mem_alloc_list;
|
||||||
|
|
||||||
@@ -898,12 +895,12 @@ int mods_unregister_all_alloc(struct mods_client *client)
|
|||||||
|
|
||||||
p_mem_info = list_entry(iter, struct MODS_MEM_INFO, list);
|
p_mem_info = list_entry(iter, struct MODS_MEM_INFO, list);
|
||||||
err = mods_unregister_and_free(client, p_mem_info);
|
err = mods_unregister_and_free(client, p_mem_info);
|
||||||
if (!final_err)
|
if (likely(!final_err))
|
||||||
final_err = err;
|
final_err = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = release_free_chunks(client);
|
err = release_free_chunks(client);
|
||||||
if (!final_err)
|
if (likely(!final_err))
|
||||||
final_err = err;
|
final_err = err;
|
||||||
|
|
||||||
return final_err;
|
return final_err;
|
||||||
@@ -1060,7 +1057,10 @@ struct MODS_MEM_INFO *mods_find_alloc(struct mods_client *client, u64 phys_addr)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 mods_estimate_num_chunks(u32 num_pages)
|
/* Estimate the initial number of chunks supported, assuming medium memory
|
||||||
|
* fragmentation.
|
||||||
|
*/
|
||||||
|
static u32 estimate_num_chunks(u32 num_pages)
|
||||||
{
|
{
|
||||||
u32 num_chunks = 0;
|
u32 num_chunks = 0;
|
||||||
u32 bit_scan;
|
u32 bit_scan;
|
||||||
@@ -1079,8 +1079,6 @@ static u32 mods_estimate_num_chunks(u32 num_pages)
|
|||||||
if (num_chunks > num_pages)
|
if (num_chunks > num_pages)
|
||||||
num_chunks = num_pages;
|
num_chunks = num_pages;
|
||||||
|
|
||||||
/* Now, if memory is heavily fragmented, we are screwed */
|
|
||||||
|
|
||||||
return num_chunks;
|
return num_chunks;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1111,12 +1109,10 @@ static struct MODS_MEM_INFO *optimize_chunks(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (p_new_mem_info) {
|
if (p_new_mem_info) {
|
||||||
atomic_inc(&client->num_allocs);
|
|
||||||
memcpy(p_new_mem_info, p_mem_info, alloc_size);
|
memcpy(p_new_mem_info, p_mem_info, alloc_size);
|
||||||
p_new_mem_info->num_chunks = num_chunks;
|
p_new_mem_info->num_chunks = num_chunks;
|
||||||
INIT_LIST_HEAD(&p_new_mem_info->dma_map_list);
|
INIT_LIST_HEAD(&p_new_mem_info->dma_map_list);
|
||||||
kfree(p_mem_info);
|
kfree(p_mem_info);
|
||||||
atomic_dec(&client->num_allocs);
|
|
||||||
p_mem_info = p_new_mem_info;
|
p_mem_info = p_new_mem_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1130,14 +1126,16 @@ static struct MODS_MEM_INFO *optimize_chunks(struct mods_client *client,
|
|||||||
int esc_mods_alloc_pages_2(struct mods_client *client,
|
int esc_mods_alloc_pages_2(struct mods_client *client,
|
||||||
struct MODS_ALLOC_PAGES_2 *p)
|
struct MODS_ALLOC_PAGES_2 *p)
|
||||||
{
|
{
|
||||||
|
int err = -EINVAL;
|
||||||
struct MODS_MEM_INFO *p_mem_info = NULL;
|
struct MODS_MEM_INFO *p_mem_info = NULL;
|
||||||
u32 num_pages;
|
u32 num_pages;
|
||||||
u32 alloc_size;
|
u32 alloc_size;
|
||||||
u32 num_chunks;
|
u32 num_chunks;
|
||||||
int err = OK;
|
|
||||||
|
|
||||||
LOG_ENT();
|
LOG_ENT();
|
||||||
|
|
||||||
|
p->memory_handle = 0;
|
||||||
|
|
||||||
cl_debug(DEBUG_MEM_DETAILED,
|
cl_debug(DEBUG_MEM_DETAILED,
|
||||||
"alloc 0x%llx bytes flags=0x%x (%s %s%s%s%s%s) node=%d on dev %04x:%02x:%02x.%x\n",
|
"alloc 0x%llx bytes flags=0x%x (%s %s%s%s%s%s) node=%d on dev %04x:%02x:%02x.%x\n",
|
||||||
(unsigned long long)p->num_bytes,
|
(unsigned long long)p->num_bytes,
|
||||||
@@ -1155,9 +1153,8 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
|
|||||||
p->pci_device.device,
|
p->pci_device.device,
|
||||||
p->pci_device.function);
|
p->pci_device.function);
|
||||||
|
|
||||||
if (!p->num_bytes) {
|
if (unlikely(!p->num_bytes)) {
|
||||||
cl_error("zero bytes requested\n");
|
cl_error("zero bytes requested\n");
|
||||||
err = -EINVAL;
|
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1165,29 +1162,27 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
|
|||||||
if (p->flags & MODS_ALLOC_CONTIGUOUS)
|
if (p->flags & MODS_ALLOC_CONTIGUOUS)
|
||||||
num_chunks = 1;
|
num_chunks = 1;
|
||||||
else
|
else
|
||||||
num_chunks = mods_estimate_num_chunks(num_pages);
|
num_chunks = estimate_num_chunks(num_pages);
|
||||||
alloc_size = sizeof(*p_mem_info) +
|
alloc_size = sizeof(*p_mem_info) +
|
||||||
(num_chunks - 1) * sizeof(struct MODS_PHYS_CHUNK);
|
(num_chunks - 1) * sizeof(struct MODS_PHYS_CHUNK);
|
||||||
|
|
||||||
if (((u64)num_pages << PAGE_SHIFT) < p->num_bytes) {
|
if (unlikely(((u64)num_pages << PAGE_SHIFT) < p->num_bytes)) {
|
||||||
cl_error("invalid allocation size requested: 0x%llx\n",
|
cl_error("invalid allocation size requested: 0x%llx\n",
|
||||||
(unsigned long long)p->num_bytes);
|
(unsigned long long)p->num_bytes);
|
||||||
err = -EINVAL;
|
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((p->flags & MODS_ALLOC_USE_NUMA) &&
|
if (unlikely((p->flags & MODS_ALLOC_USE_NUMA) &&
|
||||||
(p->numa_node != MODS_ANY_NUMA_NODE) &&
|
(p->numa_node != MODS_ANY_NUMA_NODE) &&
|
||||||
((unsigned int)p->numa_node >=
|
((unsigned int)p->numa_node >=
|
||||||
(unsigned int)num_possible_nodes())) {
|
(unsigned int)num_possible_nodes()))) {
|
||||||
|
|
||||||
cl_error("invalid NUMA node: %d\n", p->numa_node);
|
cl_error("invalid NUMA node: %d\n", p->numa_node);
|
||||||
err = -EINVAL;
|
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
if ((p->flags & MODS_ALLOC_CACHE_MASK) != MODS_ALLOC_CACHED) {
|
if (unlikely((p->flags & MODS_ALLOC_CACHE_MASK) != MODS_ALLOC_CACHED)) {
|
||||||
cl_error("unsupported cache attr %u (%s)\n",
|
cl_error("unsupported cache attr %u (%s)\n",
|
||||||
p->flags & MODS_ALLOC_CACHE_MASK,
|
p->flags & MODS_ALLOC_CACHE_MASK,
|
||||||
mods_get_prot_str(p->flags & MODS_ALLOC_CACHE_MASK));
|
mods_get_prot_str(p->flags & MODS_ALLOC_CACHE_MASK));
|
||||||
@@ -1205,20 +1200,20 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
atomic_inc(&client->num_allocs);
|
atomic_inc(&client->num_allocs);
|
||||||
|
|
||||||
p_mem_info->num_chunks = num_chunks;
|
p_mem_info->num_chunks = num_chunks;
|
||||||
p_mem_info->num_pages = num_pages;
|
p_mem_info->num_pages = num_pages;
|
||||||
p_mem_info->cache_type = p->flags & MODS_ALLOC_CACHE_MASK;
|
p_mem_info->cache_type = p->flags & MODS_ALLOC_CACHE_MASK;
|
||||||
p_mem_info->dma32 = (p->flags & MODS_ALLOC_DMA32) ? true : false;
|
p_mem_info->dma32 = (p->flags & MODS_ALLOC_DMA32) ? true : false;
|
||||||
p_mem_info->contig = (p->flags & MODS_ALLOC_CONTIGUOUS)
|
p_mem_info->contig = (p->flags & MODS_ALLOC_CONTIGUOUS)
|
||||||
? true : false;
|
? true : false;
|
||||||
p_mem_info->force_numa = (p->flags & MODS_ALLOC_FORCE_NUMA)
|
p_mem_info->force_numa = (p->flags & MODS_ALLOC_FORCE_NUMA)
|
||||||
? true : false;
|
? true : false;
|
||||||
#ifdef MODS_HASNT_NUMA_NO_NODE
|
#ifdef MODS_HASNT_NUMA_NO_NODE
|
||||||
p_mem_info->numa_node = numa_node_id();
|
p_mem_info->numa_node = numa_node_id();
|
||||||
#else
|
#else
|
||||||
p_mem_info->numa_node = NUMA_NO_NODE;
|
p_mem_info->numa_node = NUMA_NO_NODE;
|
||||||
#endif
|
#endif
|
||||||
p_mem_info->dev = NULL;
|
p_mem_info->dev = NULL;
|
||||||
|
|
||||||
if ((p->flags & MODS_ALLOC_USE_NUMA) &&
|
if ((p->flags & MODS_ALLOC_USE_NUMA) &&
|
||||||
p->numa_node != MODS_ANY_NUMA_NODE)
|
p->numa_node != MODS_ANY_NUMA_NODE)
|
||||||
@@ -1270,18 +1265,16 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p->memory_handle = 0;
|
|
||||||
|
|
||||||
if (p->flags & MODS_ALLOC_CONTIGUOUS)
|
if (p->flags & MODS_ALLOC_CONTIGUOUS)
|
||||||
err = mods_alloc_contig_sys_pages(client, p_mem_info);
|
err = mods_alloc_contig_sys_pages(client, p_mem_info);
|
||||||
else {
|
else {
|
||||||
err = mods_alloc_noncontig_sys_pages(client, p_mem_info);
|
err = mods_alloc_noncontig_sys_pages(client, p_mem_info);
|
||||||
|
|
||||||
if (!err)
|
if (likely(!err))
|
||||||
p_mem_info = optimize_chunks(client, p_mem_info);
|
p_mem_info = optimize_chunks(client, p_mem_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err) {
|
if (unlikely(err)) {
|
||||||
cl_error("failed to alloc 0x%lx %s bytes, %s, node %d%s\n",
|
cl_error("failed to alloc 0x%lx %s bytes, %s, node %d%s\n",
|
||||||
(unsigned long)p_mem_info->num_pages << PAGE_SHIFT,
|
(unsigned long)p_mem_info->num_pages << PAGE_SHIFT,
|
||||||
(p->flags & MODS_ALLOC_CONTIGUOUS) ? "contiguous" :
|
(p->flags & MODS_ALLOC_CONTIGUOUS) ? "contiguous" :
|
||||||
@@ -1292,12 +1285,14 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
|
|||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = mods_register_alloc(client, p_mem_info);
|
||||||
|
if (unlikely(err))
|
||||||
|
goto failed;
|
||||||
|
|
||||||
p->memory_handle = (u64)(size_t)p_mem_info;
|
p->memory_handle = (u64)(size_t)p_mem_info;
|
||||||
|
|
||||||
cl_debug(DEBUG_MEM_DETAILED, "alloc %p\n", p_mem_info);
|
cl_debug(DEBUG_MEM_DETAILED, "alloc %p\n", p_mem_info);
|
||||||
|
|
||||||
err = mods_register_alloc(client, p_mem_info);
|
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
if (unlikely(err && p_mem_info)) {
|
if (unlikely(err && p_mem_info)) {
|
||||||
mods_free_pages(client, p_mem_info);
|
mods_free_pages(client, p_mem_info);
|
||||||
@@ -1329,7 +1324,7 @@ int esc_mods_device_alloc_pages_2(struct mods_client *client,
|
|||||||
flags |= MODS_ALLOC_UNCACHED;
|
flags |= MODS_ALLOC_UNCACHED;
|
||||||
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
||||||
flags |= MODS_ALLOC_WRITECOMBINE;
|
flags |= MODS_ALLOC_WRITECOMBINE;
|
||||||
else if (p->attrib != MODS_MEMORY_CACHED) {
|
else if (unlikely(p->attrib != MODS_MEMORY_CACHED)) {
|
||||||
cl_error("invalid cache attrib: %u\n", p->attrib);
|
cl_error("invalid cache attrib: %u\n", p->attrib);
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1346,7 +1341,7 @@ int esc_mods_device_alloc_pages_2(struct mods_client *client,
|
|||||||
dev_alloc_pages.pci_device = p->pci_device;
|
dev_alloc_pages.pci_device = p->pci_device;
|
||||||
|
|
||||||
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
||||||
if (!err)
|
if (likely(!err))
|
||||||
p->memory_handle = dev_alloc_pages.memory_handle;
|
p->memory_handle = dev_alloc_pages.memory_handle;
|
||||||
|
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
@@ -1372,7 +1367,7 @@ int esc_mods_device_alloc_pages(struct mods_client *client,
|
|||||||
flags |= MODS_ALLOC_UNCACHED;
|
flags |= MODS_ALLOC_UNCACHED;
|
||||||
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
||||||
flags |= MODS_ALLOC_WRITECOMBINE;
|
flags |= MODS_ALLOC_WRITECOMBINE;
|
||||||
else if (p->attrib != MODS_MEMORY_CACHED) {
|
else if (unlikely(p->attrib != MODS_MEMORY_CACHED)) {
|
||||||
cl_error("invalid cache attrib: %u\n", p->attrib);
|
cl_error("invalid cache attrib: %u\n", p->attrib);
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1392,7 +1387,7 @@ int esc_mods_device_alloc_pages(struct mods_client *client,
|
|||||||
dev_alloc_pages.pci_device.function = p->pci_device.function;
|
dev_alloc_pages.pci_device.function = p->pci_device.function;
|
||||||
|
|
||||||
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
||||||
if (!err)
|
if (likely(!err))
|
||||||
p->memory_handle = dev_alloc_pages.memory_handle;
|
p->memory_handle = dev_alloc_pages.memory_handle;
|
||||||
|
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
@@ -1417,7 +1412,7 @@ int esc_mods_alloc_pages(struct mods_client *client, struct MODS_ALLOC_PAGES *p)
|
|||||||
flags |= MODS_ALLOC_UNCACHED;
|
flags |= MODS_ALLOC_UNCACHED;
|
||||||
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
else if (p->attrib == MODS_MEMORY_WRITECOMBINE)
|
||||||
flags |= MODS_ALLOC_WRITECOMBINE;
|
flags |= MODS_ALLOC_WRITECOMBINE;
|
||||||
else if (p->attrib != MODS_MEMORY_CACHED) {
|
else if (unlikely(p->attrib != MODS_MEMORY_CACHED)) {
|
||||||
cl_error("invalid cache attrib: %u\n", p->attrib);
|
cl_error("invalid cache attrib: %u\n", p->attrib);
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -1432,7 +1427,7 @@ int esc_mods_alloc_pages(struct mods_client *client, struct MODS_ALLOC_PAGES *p)
|
|||||||
dev_alloc_pages.pci_device.function = 0xFFFFU;
|
dev_alloc_pages.pci_device.function = 0xFFFFU;
|
||||||
|
|
||||||
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
err = esc_mods_alloc_pages_2(client, &dev_alloc_pages);
|
||||||
if (!err)
|
if (likely(!err))
|
||||||
p->memory_handle = dev_alloc_pages.memory_handle;
|
p->memory_handle = dev_alloc_pages.memory_handle;
|
||||||
|
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
@@ -1472,9 +1467,10 @@ int esc_mods_merge_pages(struct mods_client *client,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
|
if (unlikely(err)) {
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -EINTR;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -1619,7 +1615,8 @@ int esc_mods_set_mem_type(struct mods_client *client,
|
|||||||
struct MODS_MEMORY_TYPE *p)
|
struct MODS_MEMORY_TYPE *p)
|
||||||
{
|
{
|
||||||
struct MODS_MEM_INFO *p_mem_info;
|
struct MODS_MEM_INFO *p_mem_info;
|
||||||
u8 type = MODS_ALLOC_CACHED;
|
u8 type = MODS_ALLOC_CACHED;
|
||||||
|
int err;
|
||||||
|
|
||||||
LOG_ENT();
|
LOG_ENT();
|
||||||
|
|
||||||
@@ -1641,9 +1638,10 @@ int esc_mods_set_mem_type(struct mods_client *client,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
|
if (unlikely(err)) {
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -EINTR;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
p_mem_info = mods_find_alloc(client, p->physical_address);
|
p_mem_info = mods_find_alloc(client, p->physical_address);
|
||||||
@@ -1791,12 +1789,14 @@ int esc_mods_virtual_to_phys(struct mods_client *client,
|
|||||||
struct MODS_GET_PHYSICAL_ADDRESS get_phys_addr;
|
struct MODS_GET_PHYSICAL_ADDRESS get_phys_addr;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct list_head *iter;
|
struct list_head *iter;
|
||||||
|
int err;
|
||||||
|
|
||||||
LOG_ENT();
|
LOG_ENT();
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
|
if (unlikely(err)) {
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -EINTR;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
head = &client->mem_map_list;
|
head = &client->mem_map_list;
|
||||||
@@ -1814,7 +1814,6 @@ int esc_mods_virtual_to_phys(struct mods_client *client,
|
|||||||
if (p->virtual_address >= begin && p->virtual_address < end) {
|
if (p->virtual_address >= begin && p->virtual_address < end) {
|
||||||
|
|
||||||
u64 virt_offs = p->virtual_address - begin;
|
u64 virt_offs = p->virtual_address - begin;
|
||||||
int err;
|
|
||||||
|
|
||||||
/* device memory mapping */
|
/* device memory mapping */
|
||||||
if (!p_map_mem->p_mem_info) {
|
if (!p_map_mem->p_mem_info) {
|
||||||
@@ -1844,8 +1843,10 @@ int esc_mods_virtual_to_phys(struct mods_client *client,
|
|||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
|
|
||||||
err = esc_mods_get_phys_addr(client, &get_phys_addr);
|
err = esc_mods_get_phys_addr(client, &get_phys_addr);
|
||||||
if (err)
|
if (err) {
|
||||||
|
LOG_EXT();
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
p->physical_address = get_phys_addr.physical_address;
|
p->physical_address = get_phys_addr.physical_address;
|
||||||
|
|
||||||
@@ -1863,6 +1864,7 @@ int esc_mods_virtual_to_phys(struct mods_client *client,
|
|||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
|
|
||||||
cl_error("invalid virtual address 0x%llx\n", p->virtual_address);
|
cl_error("invalid virtual address 0x%llx\n", p->virtual_address);
|
||||||
|
LOG_EXT();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1872,14 +1874,16 @@ int esc_mods_phys_to_virtual(struct mods_client *client,
|
|||||||
struct SYS_MAP_MEMORY *p_map_mem;
|
struct SYS_MAP_MEMORY *p_map_mem;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct list_head *iter;
|
struct list_head *iter;
|
||||||
u64 offset;
|
u64 offset;
|
||||||
u64 map_offset;
|
u64 map_offset;
|
||||||
|
int err;
|
||||||
|
|
||||||
LOG_ENT();
|
LOG_ENT();
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
|
if (unlikely(err)) {
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -EINTR;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
head = &client->mem_map_list;
|
head = &client->mem_map_list;
|
||||||
@@ -1939,8 +1943,11 @@ int esc_mods_phys_to_virtual(struct mods_client *client,
|
|||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
|
|
||||||
cl_error("phys addr 0x%llx is not mapped\n", p->physical_address);
|
cl_error("phys addr 0x%llx is not mapped\n", p->physical_address);
|
||||||
|
LOG_EXT();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2298,21 +2305,24 @@ static void clear_entry_cache_mappings(struct mods_client *client,
|
|||||||
u32 clear_size = PAGE_SIZE - page_offs;
|
u32 clear_size = PAGE_SIZE - page_offs;
|
||||||
u64 remaining = chunk_offs_end - chunk_offs;
|
u64 remaining = chunk_offs_end - chunk_offs;
|
||||||
|
|
||||||
if ((u64)clear_size > remaining)
|
if (likely(page_va)) {
|
||||||
clear_size = (u32)remaining;
|
if ((u64)clear_size > remaining)
|
||||||
|
clear_size = (u32)remaining;
|
||||||
|
|
||||||
cl_debug(DEBUG_MEM_DETAILED,
|
cl_debug(DEBUG_MEM_DETAILED,
|
||||||
"clear page %u, chunk offs 0x%x, page va 0x%llx\n",
|
"clear page %u, chunk offs 0x%x, page va 0x%llx\n",
|
||||||
i_page,
|
i_page,
|
||||||
chunk_offs,
|
chunk_offs,
|
||||||
page_va);
|
page_va);
|
||||||
|
|
||||||
clear_contiguous_cache(client,
|
clear_contiguous_cache(client,
|
||||||
clear_va,
|
clear_va,
|
||||||
clear_pa,
|
clear_pa,
|
||||||
clear_size);
|
clear_size);
|
||||||
|
|
||||||
kunmap((void *)(size_t)page_va);
|
kunmap((void *)(size_t)page_va);
|
||||||
|
} else
|
||||||
|
cl_error("kmap failed\n");
|
||||||
|
|
||||||
chunk_offs += clear_size;
|
chunk_offs += clear_size;
|
||||||
}
|
}
|
||||||
@@ -2326,18 +2336,23 @@ int esc_mods_flush_cpu_cache_range(struct mods_client *client,
|
|||||||
{
|
{
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct list_head *iter;
|
struct list_head *iter;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
LOG_ENT();
|
||||||
|
|
||||||
if (irqs_disabled() || in_interrupt() ||
|
if (irqs_disabled() || in_interrupt() ||
|
||||||
p->virt_addr_start > p->virt_addr_end ||
|
p->virt_addr_start > p->virt_addr_end ||
|
||||||
p->flags == MODS_INVALIDATE_CPU_CACHE) {
|
p->flags == MODS_INVALIDATE_CPU_CACHE) {
|
||||||
|
|
||||||
cl_debug(DEBUG_MEM_DETAILED, "cannot clear cache\n");
|
cl_debug(DEBUG_MEM_DETAILED, "cannot clear cache\n");
|
||||||
|
LOG_EXT();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(mutex_lock_interruptible(&client->mtx))) {
|
err = mutex_lock_interruptible(&client->mtx);
|
||||||
|
if (unlikely(err)) {
|
||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return -EINTR;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
head = &client->mem_map_list;
|
head = &client->mem_map_list;
|
||||||
@@ -2378,6 +2393,8 @@ int esc_mods_flush_cpu_cache_range(struct mods_client *client,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&client->mtx);
|
mutex_unlock(&client->mtx);
|
||||||
|
|
||||||
|
LOG_EXT();
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2385,44 +2402,52 @@ int esc_mods_flush_cpu_cache_range(struct mods_client *client,
|
|||||||
|
|
||||||
static int mods_post_alloc(struct mods_client *client,
|
static int mods_post_alloc(struct mods_client *client,
|
||||||
struct MODS_PHYS_CHUNK *chunk,
|
struct MODS_PHYS_CHUNK *chunk,
|
||||||
u64 phys_addr,
|
u64 phys_addr,
|
||||||
struct MODS_MEM_INFO *p_mem_info)
|
struct MODS_MEM_INFO *p_mem_info)
|
||||||
{
|
{
|
||||||
u32 num_pages = 1U << chunk->order;
|
int err = 0;
|
||||||
u32 i;
|
|
||||||
|
if ((p_mem_info->cache_type != MODS_ALLOC_CACHED) && !chunk->wc) {
|
||||||
|
u32 num_pages = 1U << chunk->order;
|
||||||
|
u32 i;
|
||||||
|
|
||||||
if (p_mem_info->cache_type != MODS_ALLOC_CACHED && !chunk->wc) {
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
u64 ptr = 0;
|
void *ptr;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
ptr = (u64)(size_t)kmap(chunk->p_page + i);
|
ptr = kmap(chunk->p_page + i);
|
||||||
if (!ptr) {
|
if (unlikely(!ptr)) {
|
||||||
cl_error("kmap failed\n");
|
cl_error("kmap failed\n");
|
||||||
return -EINVAL;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
#if defined(CONFIG_ARCH_TEGRA) && !defined(CONFIG_CPA)
|
#if defined(CONFIG_ARCH_TEGRA) && !defined(CONFIG_CPA)
|
||||||
clear_contiguous_cache(client,
|
clear_contiguous_cache(client,
|
||||||
ptr,
|
(u64)(size_t)ptr,
|
||||||
phys_addr + (i << PAGE_SHIFT),
|
phys_addr + (i << PAGE_SHIFT),
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
#else
|
#else
|
||||||
err = mods_set_mem_type(ptr, 1, p_mem_info->cache_type);
|
if (p_mem_info->cache_type == MODS_ALLOC_WRITECOMBINE)
|
||||||
|
err = MODS_SET_MEMORY_WC((unsigned long)ptr, 1);
|
||||||
|
else
|
||||||
|
err = MODS_SET_MEMORY_UC((unsigned long)ptr, 1);
|
||||||
#endif
|
#endif
|
||||||
kunmap((void *)(size_t)ptr);
|
kunmap(ptr);
|
||||||
if (err) {
|
if (unlikely(err)) {
|
||||||
cl_error("set cache type failed\n");
|
cl_error("set cache type failed\n");
|
||||||
return -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
chunk->wc = 1;
|
/* Set this flag early, so that when an error occurs,
|
||||||
|
* mods_free_pages() will restore cache attributes
|
||||||
|
* for all pages. It's OK to restore cache attributes
|
||||||
|
* even for chunks where we haven't change them.
|
||||||
|
*/
|
||||||
|
chunk->wc = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
if (p_mem_info->dev) {
|
if (p_mem_info->dev) {
|
||||||
struct pci_dev *dev = p_mem_info->dev;
|
struct pci_dev *dev = p_mem_info->dev;
|
||||||
int err;
|
|
||||||
|
|
||||||
/* On systems with SWIOTLB active, disable default DMA mapping
|
/* On systems with SWIOTLB active, disable default DMA mapping
|
||||||
* because we don't support scatter-gather lists.
|
* because we don't support scatter-gather lists.
|
||||||
@@ -2432,13 +2457,11 @@ static int mods_post_alloc(struct mods_client *client,
|
|||||||
const struct dma_map_ops *ops = get_dma_ops(&dev->dev);
|
const struct dma_map_ops *ops = get_dma_ops(&dev->dev);
|
||||||
|
|
||||||
if (ops->map_sg == swiotlb_map_sg_attrs)
|
if (ops->map_sg == swiotlb_map_sg_attrs)
|
||||||
return OK;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
err = mods_dma_map_default_page(client, chunk, dev);
|
err = mods_dma_map_default_page(client, chunk, dev);
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -917,3 +917,35 @@ int esc_mods_pci_set_dma_mask(struct mods_client *client,
|
|||||||
LOG_EXT();
|
LOG_EXT();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int esc_mods_pci_reset_function(struct mods_client *client,
|
||||||
|
struct mods_pci_dev_2 *pcidev)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
struct pci_dev *dev;
|
||||||
|
|
||||||
|
LOG_ENT();
|
||||||
|
|
||||||
|
err = mods_find_pci_dev(client, pcidev, &dev);
|
||||||
|
if (unlikely(err)) {
|
||||||
|
if (err == -ENODEV)
|
||||||
|
cl_error("dev %04x:%02x:%02x.%x not found\n",
|
||||||
|
pcidev->domain,
|
||||||
|
pcidev->bus,
|
||||||
|
pcidev->device,
|
||||||
|
pcidev->function);
|
||||||
|
LOG_EXT();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pci_reset_function(dev);
|
||||||
|
if (unlikely(err))
|
||||||
|
cl_error("pci_reset_function failed on dev %04x:%02x:%02x.%x\n",
|
||||||
|
pcidev->domain,
|
||||||
|
pcidev->bus,
|
||||||
|
pcidev->device,
|
||||||
|
pcidev->function);
|
||||||
|
pci_dev_put(dev);
|
||||||
|
LOG_EXT();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
/* Driver version */
|
/* Driver version */
|
||||||
#define MODS_DRIVER_VERSION_MAJOR 3
|
#define MODS_DRIVER_VERSION_MAJOR 3
|
||||||
#define MODS_DRIVER_VERSION_MINOR 97
|
#define MODS_DRIVER_VERSION_MINOR 99
|
||||||
#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
|
#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
|
||||||
((MODS_DRIVER_VERSION_MINOR / 10) << 4) | \
|
((MODS_DRIVER_VERSION_MINOR / 10) << 4) | \
|
||||||
(MODS_DRIVER_VERSION_MINOR % 10))
|
(MODS_DRIVER_VERSION_MINOR % 10))
|
||||||
@@ -1024,11 +1024,17 @@ union ACPI_ARGUMENT {
|
|||||||
__u32 length; /* Number of bytes */
|
__u32 length; /* Number of bytes */
|
||||||
__u32 offset; /* Offset in in_buffer or out_buffer */
|
__u32 offset; /* Offset in in_buffer or out_buffer */
|
||||||
} buffer;
|
} buffer;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__u32 type;
|
||||||
|
__u64 handle;
|
||||||
|
} method;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Argument type (for the type field above) */
|
/* Argument type (for the type field above) */
|
||||||
#define ACPI_MODS_TYPE_INTEGER 1
|
#define ACPI_MODS_TYPE_INTEGER 1
|
||||||
#define ACPI_MODS_TYPE_BUFFER 2
|
#define ACPI_MODS_TYPE_BUFFER 2
|
||||||
|
#define ACPI_MODS_TYPE_METHOD 3
|
||||||
|
|
||||||
#define ACPI_MAX_BUFFER_LENGTH 4096
|
#define ACPI_MAX_BUFFER_LENGTH 4096
|
||||||
#define ACPI_MAX_METHOD_LENGTH 12
|
#define ACPI_MAX_METHOD_LENGTH 12
|
||||||
@@ -1353,12 +1359,13 @@ struct MODS_GET_NVLINK_LINE_RATE {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define MODS_MAX_SYSFS_PATH_BUF_SIZE 512
|
#define MODS_MAX_SYSFS_PATH_BUF_SIZE 512
|
||||||
#define MODS_MAX_SYSFS_PATH_LEN (512 - 6)
|
|
||||||
#define MODS_MAX_SYSFS_FILE_SIZE 4096
|
#define MODS_MAX_SYSFS_FILE_SIZE 4096
|
||||||
|
|
||||||
/* Used by MODS_ESC_WRITE_SYSFS_NODE ioctl.
|
/* Used by MODS_ESC_WRITE_SYSFS_NODE ioctl.
|
||||||
*
|
*
|
||||||
* Writes specified contents to the given sysfs node.
|
* Writes specified contents to the given sysfs node.
|
||||||
|
*
|
||||||
|
* 'path' parameter is relative to /sys/.
|
||||||
*/
|
*/
|
||||||
struct MODS_SYSFS_NODE {
|
struct MODS_SYSFS_NODE {
|
||||||
/* IN */
|
/* IN */
|
||||||
@@ -1367,6 +1374,18 @@ struct MODS_SYSFS_NODE {
|
|||||||
__u32 size; /* Size of the contents buffer, in bytes */
|
__u32 size; /* Size of the contents buffer, in bytes */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Used by MODS_ESC_SYSCTL_WRITE_INT ioctl.
|
||||||
|
*
|
||||||
|
* Writes specified integer value into a node under /proc/sys/.
|
||||||
|
*
|
||||||
|
* 'path' parameter is relative to /proc/sys/.
|
||||||
|
*/
|
||||||
|
struct MODS_SYSCTL_INT {
|
||||||
|
/* IN */
|
||||||
|
char path[MODS_MAX_SYSFS_PATH_BUF_SIZE];
|
||||||
|
__s64 value;
|
||||||
|
};
|
||||||
|
|
||||||
#define MAX_CLOCK_HANDLE_NAME 64
|
#define MAX_CLOCK_HANDLE_NAME 64
|
||||||
|
|
||||||
/* Used by MODS_ESC_GET_CLOCK_HANDLE ioctl.
|
/* Used by MODS_ESC_GET_CLOCK_HANDLE ioctl.
|
||||||
@@ -1923,5 +1942,7 @@ struct MODS_IOMMU_DMA_MAP_MEMORY {
|
|||||||
MODS_IOMMU_DMA_MAP_MEMORY)
|
MODS_IOMMU_DMA_MAP_MEMORY)
|
||||||
#define MODS_ESC_RESET_ASSERT MODSIO(W, 131, MODS_RESET_HANDLE)
|
#define MODS_ESC_RESET_ASSERT MODSIO(W, 131, MODS_RESET_HANDLE)
|
||||||
#define MODS_ESC_GET_RESET_HANDLE MODSIO(WR, 132, MODS_GET_RESET_HANDLE)
|
#define MODS_ESC_GET_RESET_HANDLE MODSIO(WR, 132, MODS_GET_RESET_HANDLE)
|
||||||
|
#define MODS_ESC_SYSCTL_WRITE_INT MODSIO(W, 133, MODS_SYSCTL_INT)
|
||||||
|
#define MODS_ESC_PCI_RESET_FUNCTION MODSIO(W, 134, mods_pci_dev_2)
|
||||||
|
|
||||||
#endif /* _UAPI_MODS_H_ */
|
#endif /* _UAPI_MODS_H_ */
|
||||||
|
|||||||
Reference in New Issue
Block a user