gpu: nvgpu: provide usermode region via mmap

Add a mmap callback on the control device node for mapping the usermode
register region to userspace. Each such mapping is removed when the GPU
railgates, and brought back again on unrailgate.

The mapping offset must be 0 and its size must be 4 KB.

Bug 200145225

Change-Id: Ie8d3758da745b958376292691d7d1d02a24e7815
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1795819
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-09-11 14:46:09 +03:00
committed by mobile promotions
parent d53495400e
commit f33935f426
4 changed files with 140 additions and 1 deletions

View File

@@ -52,6 +52,7 @@ static const struct file_operations gk20a_ctrl_ops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = gk20a_ctrl_dev_ioctl,
#endif
.mmap = gk20a_ctrl_dev_mmap,
};
static const struct file_operations gk20a_dbg_ops = {

View File

@@ -57,6 +57,10 @@ struct gk20a_ctrl_priv {
struct nvgpu_clk_session *clk_session;
struct nvgpu_list_node list;
struct {
struct vm_area_struct *vma;
unsigned long flags;
} usermode_vma;
};
static inline struct gk20a_ctrl_priv *
@@ -1910,3 +1914,130 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
return err;
}
static void usermode_vma_close(struct vm_area_struct *vma)
{
struct gk20a_ctrl_priv *priv = vma->vm_private_data;
struct gk20a *g = priv->g;
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
nvgpu_mutex_acquire(&l->ctrl.privs_lock);
priv->usermode_vma.vma = NULL;
nvgpu_mutex_release(&l->ctrl.privs_lock);
}
struct vm_operations_struct usermode_vma_ops = {
/* no .open - we use VM_DONTCOPY and don't support fork */
.close = usermode_vma_close,
};
int gk20a_ctrl_dev_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct gk20a_ctrl_priv *priv = filp->private_data;
struct gk20a *g = priv->g;
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
u64 addr;
int err;
if (g->ops.fifo.usermode_base == NULL)
return -ENOSYS;
if (priv->usermode_vma.vma != NULL)
return -EBUSY;
if (vma->vm_end - vma->vm_start != SZ_4K)
return -EINVAL;
if (vma->vm_pgoff != 0UL)
return -EINVAL;
addr = l->regs_bus_addr + g->ops.fifo.usermode_base(g);
/* Sync with poweron/poweroff, and require valid regs */
err = gk20a_busy(g);
if (err) {
return err;
}
nvgpu_mutex_acquire(&l->ctrl.privs_lock);
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
vma->vm_ops = &usermode_vma_ops;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
err = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
if (!err) {
priv->usermode_vma.vma = vma;
priv->usermode_vma.flags = vma->vm_flags;
vma->vm_private_data = priv;
}
nvgpu_mutex_release(&l->ctrl.privs_lock);
gk20a_idle(g);
return err;
}
static void alter_usermode_mapping(struct gk20a *g,
struct gk20a_ctrl_priv *priv,
bool poweroff)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
struct vm_area_struct *vma = priv->usermode_vma.vma;
u64 addr;
int err;
if (!vma) {
/* Nothing to do - no mmap called */
return;
}
addr = l->regs_bus_addr + g->ops.fifo.usermode_base(g);
down_write(&vma->vm_mm->mmap_sem);
if (poweroff) {
err = zap_vma_ptes(vma, vma->vm_start, SZ_4K);
if (err == 0) {
vma->vm_flags = VM_NONE;
} else {
nvgpu_err(g, "can't remove usermode mapping");
}
} else {
vma->vm_flags = priv->usermode_vma.flags;
err = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
SZ_4K, vma->vm_page_prot);
if (err != 0) {
nvgpu_err(g, "can't restore usermode mapping");
vma->vm_flags = VM_NONE;
}
}
up_write(&vma->vm_mm->mmap_sem);
}
static void alter_usermode_mappings(struct gk20a *g, bool poweroff)
{
struct gk20a_ctrl_priv *priv;
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
nvgpu_mutex_acquire(&l->ctrl.privs_lock);
nvgpu_list_for_each_entry(priv, &l->ctrl.privs,
gk20a_ctrl_priv, list) {
alter_usermode_mapping(g, priv, poweroff);
}
nvgpu_mutex_release(&l->ctrl.privs_lock);
}
void nvgpu_hide_usermode_for_poweroff(struct gk20a *g)
{
alter_usermode_mappings(g, true);
}
void nvgpu_restore_usermode_for_poweron(struct gk20a *g)
{
alter_usermode_mappings(g, false);
}

View File

@@ -19,5 +19,9 @@
int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp);
int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp);
long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
int gk20a_ctrl_dev_mmap(struct file *filp, struct vm_area_struct *vma);
void nvgpu_hide_usermode_for_poweroff(struct gk20a *g);
void nvgpu_restore_usermode_for_poweron(struct gk20a *g);
#endif

View File

@@ -56,6 +56,7 @@
#include "module_usermode.h"
#include "intr.h"
#include "ioctl.h"
#include "ioctl_ctrl.h"
#include "os_linux.h"
#include "os_ops.h"
@@ -76,7 +77,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/gk20a.h>
struct device_node *nvgpu_get_node(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);
@@ -260,6 +260,8 @@ int gk20a_pm_finalize_poweron(struct device *dev)
if (err)
goto done;
nvgpu_restore_usermode_for_poweron(g);
/* Enable interrupt workqueue */
if (!l->nonstall_work_queue) {
l->nonstall_work_queue = alloc_workqueue("%s",
@@ -392,6 +394,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
/* Stop CPU from accessing the GPU registers. */
gk20a_lockout_registers(g);
nvgpu_hide_usermode_for_poweroff(g);
nvgpu_mutex_release(&g->power_lock);
return 0;