mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
crypto: tegra: Add support for SE on host1x to OOT
Add driver to support Security Engine on host1x as OOT module Bug 3583641 Signed-off-by: Akhil R <akhilrajeev@nvidia.com> Change-Id: If2eaff4e783cc6426db6a711e1300568735dab76 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2726869 Reviewed-by: Bitan Biswas <bbiswas@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
db30560ec0
commit
198a0ba027
@@ -3,6 +3,7 @@
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
|
||||
|
||||
obj-m += crypto/
|
||||
obj-m += devfreq/
|
||||
obj-m += dma/
|
||||
obj-m += gpu/
|
||||
|
||||
7
drivers/crypto/Makefile
Normal file
7
drivers/crypto/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
ccflags-y += -Wno-implicit-fallthrough
|
||||
ccflags-y += -I$(srctree.nvidia)/drivers/gpu/host1x/hw/
|
||||
ccflags-y += -I$(srctree.nvidia)/include
|
||||
ccflags-y += -I$(srctree)/include
|
||||
|
||||
obj-m += tegra-se-nvhost.o
|
||||
obj-m += tegra-se-nvrng.o
|
||||
177
drivers/crypto/hardware_t194.h
Normal file
177
drivers/crypto/hardware_t194.h
Normal file
@@ -0,0 +1,177 @@
|
||||
/*
|
||||
* drivers/video/tegra/host/t194/hardware_t194.h
|
||||
*
|
||||
* Tegra T194 HOST1X Register Definitions
|
||||
*
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#ifndef __NVHOST_HARDWARE_T194_H
|
||||
#define __NVHOST_HARDWARE_T194_H
|
||||
|
||||
#include <hw_host1x05_sync.h>
|
||||
#include <hw_host1x06_uclass.h>
|
||||
#include <hw_host1x06_channel.h>
|
||||
|
||||
/* sync registers */
|
||||
#define NV_HOST1X_SYNCPT_NB_PTS 704
|
||||
#define NV_HOST1X_NB_MLOCKS 32
|
||||
|
||||
#define NV_HOST1X_MLOCK_ID_NVCSI 7
|
||||
#define NV_HOST1X_MLOCK_ID_ISP 8
|
||||
#define NV_HOST1X_MLOCK_ID_VI 16
|
||||
#define NV_HOST1X_MLOCK_ID_VIC 17
|
||||
#define NV_HOST1X_MLOCK_ID_NVENC 18
|
||||
#define NV_HOST1X_MLOCK_ID_NVDEC 19
|
||||
#define NV_HOST1X_MLOCK_ID_NVJPG 20
|
||||
#define NV_HOST1X_MLOCK_ID_TSEC 21
|
||||
#define NV_HOST1X_MLOCK_ID_TSECB 22
|
||||
#define NV_HOST1X_MLOCK_ID_NVENC1 29
|
||||
#define NV_HOST1X_MLOCK_ID_NVDEC1 31
|
||||
|
||||
#define HOST1X_THOST_ACTMON_NVENC 0x00000
|
||||
#define HOST1X_THOST_ACTMON_VIC 0x10000
|
||||
#define HOST1X_THOST_ACTMON_NVDEC 0x20000
|
||||
#define HOST1X_THOST_ACTMON_NVJPG 0x30000
|
||||
#define HOST1X_THOST_ACTMON_NVENC1 0x40000
|
||||
#define HOST1X_THOST_ACTMON_NVDEC1 0x50000
|
||||
|
||||
/* Generic support */
|
||||
static inline u32 nvhost_class_host_wait_syncpt(
|
||||
unsigned indx, unsigned threshold)
|
||||
{
|
||||
return (indx << 24) | (threshold & 0xffffff);
|
||||
}
|
||||
|
||||
static inline u32 nvhost_class_host_load_syncpt_base(
|
||||
unsigned indx, unsigned threshold)
|
||||
{
|
||||
return host1x_uclass_wait_syncpt_indx_f(indx)
|
||||
| host1x_uclass_wait_syncpt_thresh_f(threshold);
|
||||
}
|
||||
|
||||
static inline u32 nvhost_class_host_incr_syncpt(
|
||||
unsigned cond, unsigned indx)
|
||||
{
|
||||
return host1x_uclass_incr_syncpt_cond_f(cond)
|
||||
| host1x_uclass_incr_syncpt_indx_f(indx);
|
||||
}
|
||||
|
||||
enum {
|
||||
NV_HOST_MODULE_HOST1X = 0,
|
||||
NV_HOST_MODULE_MPE = 1,
|
||||
NV_HOST_MODULE_GR3D = 6
|
||||
};
|
||||
|
||||
/* cdma opcodes */
|
||||
static inline u32 nvhost_opcode_setclass(
|
||||
unsigned class_id, unsigned offset, unsigned mask)
|
||||
{
|
||||
return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (1 << 28) | (offset << 16) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (2 << 28) | (offset << 16) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
|
||||
{
|
||||
return (3 << 28) | (offset << 16) | mask;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
|
||||
{
|
||||
return (4 << 28) | (offset << 16) | value;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
|
||||
{
|
||||
return nvhost_opcode_imm(host1x_uclass_incr_syncpt_r(),
|
||||
nvhost_class_host_incr_syncpt(cond, indx));
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_restart(unsigned address)
|
||||
{
|
||||
return (5 << 28) | (address >> 4);
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather(unsigned count)
|
||||
{
|
||||
return (6 << 28) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_insert(unsigned offset, unsigned incr,
|
||||
unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | (incr << 14) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_setstreamid(unsigned streamid)
|
||||
{
|
||||
return (7 << 28) | streamid;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_setpayload(unsigned payload)
|
||||
{
|
||||
return (9 << 28) | payload;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_acquire_mlock(unsigned id)
|
||||
{
|
||||
return (14 << 28) | id;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_release_mlock(unsigned id)
|
||||
{
|
||||
return (14 << 28) | (1 << 24) | id;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_incr_w(unsigned int offset)
|
||||
{
|
||||
/* 22-bit offset supported */
|
||||
return (10 << 28) | offset;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_nonincr_w(unsigned offset)
|
||||
{
|
||||
/* 22-bit offset supported */
|
||||
return (11 << 28) | offset;
|
||||
}
|
||||
|
||||
#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
|
||||
|
||||
static inline u32 nvhost_mask2(unsigned x, unsigned y)
|
||||
{
|
||||
return 1 | (1 << (y - x));
|
||||
}
|
||||
|
||||
#endif /* __NVHOST_HARDWARE_T194_H */
|
||||
952
drivers/crypto/nvhost.h
Normal file
952
drivers/crypto/nvhost.h
Normal file
@@ -0,0 +1,952 @@
|
||||
/*
|
||||
* include/linux/nvhost.h
|
||||
*
|
||||
* Tegra graphics host driver
|
||||
*
|
||||
* Copyright (c) 2009-2022, NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_NVHOST_H
|
||||
#define __LINUX_NVHOST_H
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
//#include <uapi/linux/nvdev_fence.h>
|
||||
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
#include <linux/host1x.h>
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
#error "Unable to enable TEGRA_GRHOST or TEGRA_HOST1X at the same time!"
|
||||
#endif
|
||||
|
||||
struct tegra_bwmgr_client;
|
||||
|
||||
struct nvhost_channel;
|
||||
struct nvhost_master;
|
||||
struct nvhost_cdma;
|
||||
struct nvhost_hwctx;
|
||||
struct nvhost_device_power_attr;
|
||||
struct nvhost_device_profile;
|
||||
struct mem_mgr;
|
||||
struct nvhost_as_moduleops;
|
||||
struct nvhost_ctrl_sync_fence_info;
|
||||
struct nvhost_sync_timeline;
|
||||
struct nvhost_sync_pt;
|
||||
enum nvdev_fence_kind;
|
||||
struct nvdev_fence;
|
||||
struct sync_pt;
|
||||
struct dma_fence;
|
||||
struct nvhost_fence;
|
||||
|
||||
#define NVHOST_MODULE_MAX_CLOCKS 8
|
||||
#define NVHOST_MODULE_MAX_SYNCPTS 16
|
||||
#define NVHOST_MODULE_MAX_WAITBASES 3
|
||||
#define NVHOST_MODULE_MAX_MODMUTEXES 5
|
||||
#define NVHOST_MODULE_MAX_IORESOURCE_MEM 5
|
||||
#define NVHOST_NAME_SIZE 24
|
||||
#define NVSYNCPT_INVALID (-1)
|
||||
|
||||
#define NVSYNCPT_AVP_0 (10) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_3D (22) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK0 (26) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK1 (27) /* t20, t30, t114, t148 */
|
||||
|
||||
#define NVMODMUTEX_ISP_0 (1) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_ISP_1 (2) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_NVJPG (3) /* t210 */
|
||||
#define NVMODMUTEX_NVDEC (4) /* t210 */
|
||||
#define NVMODMUTEX_MSENC (5) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECA (6) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECB (7) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI (8) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_0 (8) /* t148 */
|
||||
#define NVMODMUTEX_VIC (10) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_1 (11) /* t124, t132, t210 */
|
||||
|
||||
enum nvhost_power_sysfs_attributes {
|
||||
NVHOST_POWER_SYSFS_ATTRIB_AUTOSUSPEND_DELAY,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_FORCE_ON,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_MAX
|
||||
};
|
||||
|
||||
struct nvhost_notification {
|
||||
struct { /* 0000- */
|
||||
__u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */
|
||||
} time_stamp; /* -0007 */
|
||||
__u32 info32; /* info returned depends on method 0008-000b */
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8
|
||||
#define NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY 13
|
||||
#define NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT 24
|
||||
#define NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY 25
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT 31
|
||||
#define NVHOST_CHANNEL_PBDMA_ERROR 32
|
||||
#define NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR 43
|
||||
__u16 info16; /* info returned depends on method 000c-000d */
|
||||
__u16 status; /* user sets bit 15, NV sets status 000e-000f */
|
||||
#define NVHOST_CHANNEL_SUBMIT_TIMEOUT 1
|
||||
};
|
||||
|
||||
struct nvhost_gating_register {
|
||||
u64 addr;
|
||||
u32 prod;
|
||||
u32 disable;
|
||||
};
|
||||
|
||||
struct nvhost_actmon_register {
|
||||
u32 addr;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
enum tegra_emc_request_type {
|
||||
TEGRA_SET_EMC_FLOOR, /* lower bound */
|
||||
TEGRA_SET_EMC_CAP, /* upper bound */
|
||||
TEGRA_SET_EMC_ISO_CAP, /* upper bound that affects ISO Bw */
|
||||
TEGRA_SET_EMC_SHARED_BW, /* shared bw request */
|
||||
TEGRA_SET_EMC_SHARED_BW_ISO, /* for use by ISO Mgr only */
|
||||
TEGRA_SET_EMC_REQ_COUNT /* Should always be last */
|
||||
};
|
||||
|
||||
struct nvhost_clock {
|
||||
char *name;
|
||||
unsigned long default_rate;
|
||||
u32 moduleid;
|
||||
enum tegra_emc_request_type request_type;
|
||||
bool disable_scaling;
|
||||
unsigned long devfreq_rate;
|
||||
};
|
||||
|
||||
struct nvhost_vm_hwid {
|
||||
u64 addr;
|
||||
bool dynamic;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines HW and SW class identifiers.
|
||||
*
|
||||
* This is module ID mapping between userspace and kernelspace.
|
||||
* The values of enum entries' are referred from NvRmModuleID enum defined
|
||||
* in below userspace file:
|
||||
* $TOP/vendor/nvidia/tegra/core/include/nvrm_module.h
|
||||
* Please make sure each entry below has same value as set in above file.
|
||||
*/
|
||||
enum nvhost_module_identifier {
|
||||
|
||||
/* Specifies external memory (DDR RAM, etc) */
|
||||
NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER = 75,
|
||||
|
||||
/* Specifies CBUS floor client module */
|
||||
NVHOST_MODULE_ID_CBUS_FLOOR = 119,
|
||||
|
||||
/* Specifies shared EMC client module */
|
||||
NVHOST_MODULE_ID_EMC_SHARED,
|
||||
NVHOST_MODULE_ID_MAX
|
||||
};
|
||||
|
||||
enum nvhost_resource_policy {
|
||||
RESOURCE_PER_DEVICE = 0,
|
||||
RESOURCE_PER_CHANNEL_INSTANCE,
|
||||
};
|
||||
|
||||
struct nvhost_device_data {
|
||||
int version; /* ip version number of device */
|
||||
int id; /* Separates clients of same hw */
|
||||
void __iomem *aperture[NVHOST_MODULE_MAX_IORESOURCE_MEM];
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
u32 modulemutexes[NVHOST_MODULE_MAX_MODMUTEXES];
|
||||
u32 moduleid; /* Module id for user space API */
|
||||
|
||||
/* interrupt ISR routine for falcon based engines */
|
||||
int (*flcn_isr)(struct platform_device *dev);
|
||||
int irq;
|
||||
int module_irq; /* IRQ bit from general intr reg for module intr */
|
||||
spinlock_t mirq_lock; /* spin lock for module irq */
|
||||
bool self_config_flcn_isr; /* skip setting up falcon interrupts */
|
||||
|
||||
/* Should we toggle the engine SLCG when we turn on the domain? */
|
||||
bool poweron_toggle_slcg;
|
||||
|
||||
/* Flag to set SLCG notifier (for the modules other than VIC) */
|
||||
bool slcg_notifier_enable;
|
||||
|
||||
/* Used to serialize channel when map-at-submit is used w/o mlocks */
|
||||
u32 last_submit_syncpt_id;
|
||||
u32 last_submit_syncpt_value;
|
||||
|
||||
bool power_on; /* If module is powered on */
|
||||
|
||||
u32 class; /* Device class */
|
||||
bool exclusive; /* True if only one user at a time */
|
||||
bool keepalive; /* Do not power gate when opened */
|
||||
bool serialize; /* Serialize submits in the channel */
|
||||
bool push_work_done; /* Push_op done into push buffer */
|
||||
bool poweron_reset; /* Reset the engine before powerup */
|
||||
bool virtual_dev; /* True if virtualized device */
|
||||
char *devfs_name; /* Name in devfs */
|
||||
char *devfs_name_family; /* Core of devfs name */
|
||||
|
||||
/* Support aborting the channel with close(channel_fd) */
|
||||
bool support_abort_on_close;
|
||||
|
||||
char *firmware_name; /* Name of firmware */
|
||||
bool firmware_not_in_subdir; /* Firmware is not located in
|
||||
chip subdirectory */
|
||||
|
||||
bool engine_can_cg; /* True if CG is enabled */
|
||||
bool can_powergate; /* True if module can be power gated */
|
||||
int autosuspend_delay;/* Delay before power gated */
|
||||
struct nvhost_clock clocks[NVHOST_MODULE_MAX_CLOCKS];/* Clock names */
|
||||
|
||||
/* Clock gating registers */
|
||||
struct nvhost_gating_register *engine_cg_regs;
|
||||
|
||||
int num_clks; /* Number of clocks opened for dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
|
||||
#else
|
||||
struct clk_bulk_data *clks;
|
||||
#endif
|
||||
struct mutex lock; /* Power management lock */
|
||||
struct list_head client_list; /* List of clients and rate requests */
|
||||
|
||||
int num_channels; /* Max num of channel supported */
|
||||
int num_mapped_chs; /* Num of channel mapped to device */
|
||||
int num_ppc; /* Number of pixels per clock cycle */
|
||||
|
||||
/* device node for channel operations */
|
||||
dev_t cdev_region;
|
||||
struct device *node;
|
||||
struct cdev cdev;
|
||||
|
||||
/* Address space device node */
|
||||
struct device *as_node;
|
||||
struct cdev as_cdev;
|
||||
|
||||
/* device node for ctrl block */
|
||||
struct class *nvhost_class;
|
||||
struct device *ctrl_node;
|
||||
struct cdev ctrl_cdev;
|
||||
const struct file_operations *ctrl_ops; /* ctrl ops for the module */
|
||||
|
||||
/* address space operations */
|
||||
const struct nvhost_as_moduleops *as_ops;
|
||||
|
||||
struct kobject *power_kobj; /* kobject to hold power sysfs entries */
|
||||
struct nvhost_device_power_attr *power_attrib; /* sysfs attributes */
|
||||
/* kobject to hold clk_cap sysfs entries */
|
||||
struct kobject clk_cap_kobj;
|
||||
struct kobj_attribute *clk_cap_attrs;
|
||||
struct dentry *debugfs; /* debugfs directory */
|
||||
|
||||
u32 nvhost_timeout_default;
|
||||
|
||||
/* Data for devfreq usage */
|
||||
struct devfreq *power_manager;
|
||||
/* Private device profile data */
|
||||
struct nvhost_device_profile *power_profile;
|
||||
/* Should we read load estimate from hardware? */
|
||||
bool actmon_enabled;
|
||||
/* Should we do linear emc scaling? */
|
||||
bool linear_emc;
|
||||
/* Offset to actmon registers */
|
||||
u32 actmon_regs;
|
||||
/* WEIGHT_COUNT of actmon */
|
||||
u32 actmon_weight_count;
|
||||
struct nvhost_actmon_register *actmon_setting_regs;
|
||||
/* Devfreq governor name */
|
||||
const char *devfreq_governor;
|
||||
unsigned long *freq_table;
|
||||
|
||||
/* Marks if the device is booted when pm runtime is disabled */
|
||||
bool booted;
|
||||
|
||||
/* Should be marked as true if nvhost shouldn't create device nodes */
|
||||
bool kernel_only;
|
||||
|
||||
void *private_data; /* private platform data */
|
||||
void *falcon_data; /* store the falcon info */
|
||||
struct platform_device *pdev; /* owner platform_device */
|
||||
void *virt_priv; /* private data for virtualized dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
struct host1x *host1x; /* host1x device */
|
||||
#endif
|
||||
|
||||
struct mutex no_poweroff_req_mutex;
|
||||
struct dev_pm_qos_request no_poweroff_req;
|
||||
int no_poweroff_req_count;
|
||||
|
||||
struct notifier_block toggle_slcg_notifier;
|
||||
|
||||
struct rw_semaphore busy_lock;
|
||||
bool forced_idle;
|
||||
|
||||
/* Finalize power on. Can be used for context restore. */
|
||||
int (*finalize_poweron)(struct platform_device *dev);
|
||||
|
||||
/* Called each time we enter the class */
|
||||
int (*init_class_context)(struct platform_device *dev,
|
||||
struct nvhost_cdma *cdma);
|
||||
|
||||
/*
|
||||
* Reset the unit. Used for timeout recovery, resetting the unit on
|
||||
* probe and when un-powergating.
|
||||
*/
|
||||
void (*reset)(struct platform_device *dev);
|
||||
|
||||
/* Device is busy. */
|
||||
void (*busy)(struct platform_device *);
|
||||
|
||||
/* Device is idle. */
|
||||
void (*idle)(struct platform_device *);
|
||||
|
||||
/* Scaling init is run on device registration */
|
||||
void (*scaling_init)(struct platform_device *dev);
|
||||
|
||||
/* Scaling deinit is called on device unregistration */
|
||||
void (*scaling_deinit)(struct platform_device *dev);
|
||||
|
||||
/* Postscale callback is called after frequency change */
|
||||
void (*scaling_post_cb)(struct nvhost_device_profile *profile,
|
||||
unsigned long freq);
|
||||
|
||||
/* Preparing for power off. Used for context save. */
|
||||
int (*prepare_poweroff)(struct platform_device *dev);
|
||||
|
||||
/* paring for power off. Used for context save. */
|
||||
int (*aggregate_constraints)(struct platform_device *dev,
|
||||
int clk_index,
|
||||
unsigned long floor_rate,
|
||||
unsigned long pixel_rate,
|
||||
unsigned long bw_rate);
|
||||
|
||||
/* Called after successful client device init. This can
|
||||
* be used in cases where the hardware specifics differ
|
||||
* between hardware revisions */
|
||||
int (*hw_init)(struct platform_device *dev);
|
||||
|
||||
/* Used to add platform specific masks on reloc address */
|
||||
dma_addr_t (*get_reloc_phys_addr)(dma_addr_t phys_addr, u32 reloc_type);
|
||||
|
||||
/* Allocates a context handler for the device */
|
||||
struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
|
||||
struct nvhost_channel *ch);
|
||||
|
||||
/* engine specific init functions */
|
||||
int (*pre_virt_init)(struct platform_device *pdev);
|
||||
int (*post_virt_init)(struct platform_device *pdev);
|
||||
|
||||
/* engine specific functions */
|
||||
int (*memory_init)(struct platform_device *pdev);
|
||||
|
||||
phys_addr_t carveout_addr;
|
||||
phys_addr_t carveout_size;
|
||||
|
||||
/* Information related to engine-side synchronization */
|
||||
void *syncpt_unit_interface;
|
||||
|
||||
u64 transcfg_addr;
|
||||
u32 transcfg_val;
|
||||
u64 mamask_addr;
|
||||
u32 mamask_val;
|
||||
u64 borps_addr;
|
||||
u32 borps_val;
|
||||
struct nvhost_vm_hwid vm_regs[13];
|
||||
|
||||
/* Actmon IRQ from hintstatus_r */
|
||||
unsigned int actmon_irq;
|
||||
|
||||
/* Is the device already forced on? */
|
||||
bool forced_on;
|
||||
|
||||
/* Should we map channel at submit time? */
|
||||
bool resource_policy;
|
||||
|
||||
/* Should we enable context isolation for this device? */
|
||||
bool isolate_contexts;
|
||||
|
||||
/* channel user context list */
|
||||
struct mutex userctx_list_lock;
|
||||
struct list_head userctx_list;
|
||||
|
||||
/* reset control for this device */
|
||||
struct reset_control *reset_control;
|
||||
|
||||
/* For loadable nvgpu module, we dynamically assign function
|
||||
* pointer of gk20a_debug_dump_device once the module loads */
|
||||
void *debug_dump_data;
|
||||
void (*debug_dump_device)(void *dev);
|
||||
|
||||
/* icc client id for emc requests */
|
||||
int icc_id;
|
||||
|
||||
/* icc_path handle handle */
|
||||
struct icc_path *icc_path_handle;
|
||||
|
||||
/* bandwidth manager client id for emc requests */
|
||||
int bwmgr_client_id;
|
||||
|
||||
/* bandwidth manager handle */
|
||||
struct tegra_bwmgr_client *bwmgr_handle;
|
||||
|
||||
/* number of frames mlock can be locked for */
|
||||
u32 mlock_timeout_factor;
|
||||
|
||||
/* eventlib id for the device */
|
||||
int eventlib_id;
|
||||
|
||||
/* deliver task timestamps for falcon */
|
||||
void (*enable_timestamps)(struct platform_device *pdev,
|
||||
struct nvhost_cdma *cdma, dma_addr_t timestamp_addr);
|
||||
|
||||
/* enable risc-v boot */
|
||||
bool enable_riscv_boot;
|
||||
|
||||
/* store the risc-v info */
|
||||
void *riscv_data;
|
||||
|
||||
/* name of riscv descriptor binary */
|
||||
char *riscv_desc_bin;
|
||||
|
||||
/* name of riscv image binary */
|
||||
char *riscv_image_bin;
|
||||
|
||||
};
|
||||
|
||||
|
||||
static inline
|
||||
struct nvhost_device_data *nvhost_get_devdata(struct platform_device *pdev)
|
||||
{
|
||||
return (struct nvhost_device_data *)platform_get_drvdata(pdev);
|
||||
}
|
||||
|
||||
static inline bool nvhost_dev_is_virtual(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata->virtual_dev;
|
||||
}
|
||||
|
||||
struct nvhost_device_power_attr {
|
||||
struct platform_device *ndev;
|
||||
struct kobj_attribute power_attr[NVHOST_POWER_SYSFS_ATTRIB_MAX];
|
||||
};
|
||||
|
||||
int flcn_intr_init(struct platform_device *pdev);
|
||||
int flcn_reload_fw(struct platform_device *pdev);
|
||||
int nvhost_flcn_prepare_poweroff(struct platform_device *pdev);
|
||||
int nvhost_flcn_finalize_poweron(struct platform_device *dev);
|
||||
|
||||
/* common runtime pm and power domain APIs */
|
||||
int nvhost_module_init(struct platform_device *ndev);
|
||||
void nvhost_module_deinit(struct platform_device *dev);
|
||||
void nvhost_module_reset(struct platform_device *dev, bool reboot);
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
void nvhost_module_idle_mult(struct platform_device *pdev, int refs);
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
extern const struct dev_pm_ops nvhost_module_pm_ops;
|
||||
|
||||
void host1x_writel(struct platform_device *dev, u32 r, u32 v);
|
||||
u32 host1x_readl(struct platform_device *dev, u32 r);
|
||||
|
||||
/* common device management APIs */
|
||||
int nvhost_client_device_get_resources(struct platform_device *dev);
|
||||
int nvhost_client_device_release(struct platform_device *dev);
|
||||
int nvhost_client_device_init(struct platform_device *dev);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev,
|
||||
u32 param, const char *syncpt_name);
|
||||
void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_minval(struct platform_device *dev, u32 id, u32 val);
|
||||
void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val);
|
||||
u32 nvhost_syncpt_read_maxval(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs);
|
||||
int nvhost_syncpt_is_expired_ext(struct platform_device *dev, u32 id,
|
||||
u32 thresh);
|
||||
dma_addr_t nvhost_syncpt_address(struct platform_device *engine_pdev, u32 id);
|
||||
int nvhost_syncpt_unit_interface_init(struct platform_device *pdev);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
|
||||
static inline struct flcn *get_flcn(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata ? pdata->falcon_data : NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index,
|
||||
unsigned long attr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_add_client(struct platform_device *dev, void *priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_module_remove_client(struct platform_device *dev, void *priv) { }
|
||||
|
||||
static inline int nvhost_syncpt_get_cv_dev_address_table(struct platform_device *engine_pdev,
|
||||
int *count, dma_addr_t **table)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_job_put(struct nvhost_job *job) {}
|
||||
|
||||
static inline int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_submit(struct nvhost_job *job)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void nvhost_putchannel(struct nvhost_channel *ch, int cnt) {}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence) {}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
u32 kind,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data);
|
||||
void nvhost_unregister_dump_device(struct platform_device *dev);
|
||||
#else
|
||||
static inline void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_unregister_dump_device(struct platform_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v);
|
||||
u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r);
|
||||
|
||||
void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v);
|
||||
u32 host1x_sync_readl(struct nvhost_master *dev, u32 r);
|
||||
|
||||
/* public host1x power management APIs */
|
||||
bool nvhost_module_powered_ext(struct platform_device *dev);
|
||||
/* This power ON only host1x and doesn't power ON module */
|
||||
int nvhost_module_busy_ext(struct platform_device *dev);
|
||||
/* This power OFF only host1x and doesn't power OFF module */
|
||||
void nvhost_module_idle_ext(struct platform_device *dev);
|
||||
|
||||
/* public api to return platform_device ptr to the default host1x instance */
|
||||
struct platform_device *nvhost_get_default_device(void);
|
||||
|
||||
/* Public PM nvhost APIs. */
|
||||
/* This power ON both host1x and module */
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
/* This power OFF both host1x and module */
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
|
||||
/* public api to register/unregister a subdomain */
|
||||
void nvhost_register_client_domain(struct generic_pm_domain *domain);
|
||||
void nvhost_unregister_client_domain(struct generic_pm_domain *domain);
|
||||
|
||||
int nvhost_module_add_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
void nvhost_module_remove_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
|
||||
int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index, unsigned long attr);
|
||||
|
||||
/* public APIs required to submit in-kernel work */
|
||||
int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier);
|
||||
void nvhost_putchannel(struct nvhost_channel *ch, int cnt);
|
||||
/* Allocate memory for a job. Just enough memory will be allocated to
|
||||
* accomodate the submit announced in submit header.
|
||||
*/
|
||||
struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts);
|
||||
/* Decrement reference job, free if goes to zero. */
|
||||
void nvhost_job_put(struct nvhost_job *job);
|
||||
|
||||
/* Add a gather with IOVA address to job */
|
||||
int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address);
|
||||
int nvhost_channel_submit(struct nvhost_job *job);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
|
||||
const char *syncpt_name);
|
||||
void nvhost_syncpt_get_ref_ext(struct platform_device *pdev, u32 id);
|
||||
const char *nvhost_syncpt_get_name(struct platform_device *dev, int id);
|
||||
void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val);
|
||||
int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id, u32 thresh,
|
||||
u32 timeout, u32 *value, struct timespec64 *ts);
|
||||
int nvhost_syncpt_create_fence_single_ext(struct platform_device *dev,
|
||||
u32 id, u32 thresh, const char *name, int *fence_fd);
|
||||
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_read_minval(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_maxval(struct platform_device *dev, u32 id, u32 val);
|
||||
int nvhost_syncpt_fd_get_ext(int fd, struct platform_device *pdev, u32 *id);
|
||||
|
||||
void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end);
|
||||
|
||||
void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp);
|
||||
|
||||
void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
u32 kind,
|
||||
u64 timestamp);
|
||||
|
||||
dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_fast_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && defined(CONFIG_DEBUG_FS)
|
||||
void nvhost_debug_dump_device(struct platform_device *pdev);
|
||||
#else
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn);
|
||||
#else
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC)
|
||||
|
||||
int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data);
|
||||
|
||||
int nvhost_fence_get_pt(
|
||||
struct nvhost_fence *fence, size_t i,
|
||||
u32 *id, u32 *threshold);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_create(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name);
|
||||
int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_get(int fd);
|
||||
struct nvhost_fence *nvhost_fence_dup(struct nvhost_fence *fence);
|
||||
int nvhost_fence_num_pts(struct nvhost_fence *fence);
|
||||
int nvhost_fence_install(struct nvhost_fence *fence, int fence_fd);
|
||||
void nvhost_fence_put(struct nvhost_fence *fence);
|
||||
void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms);
|
||||
|
||||
#else
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *d),
|
||||
void *d)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_create_fence(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && !defined(CONFIG_SYNC)
|
||||
int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id, u32 *threshold);
|
||||
bool nvhost_dma_fence_is_waitable(struct dma_fence *fence);
|
||||
#else
|
||||
static inline int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id,
|
||||
u32 *threshold)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline bool nvhost_dma_fence_is_waitable(struct dma_fence *fence)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && defined(CONFIG_SYNC)
|
||||
struct sync_fence *nvhost_sync_fdget(int fd);
|
||||
int nvhost_sync_num_pts(struct sync_fence *fence);
|
||||
struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name);
|
||||
int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
int nvhost_sync_fence_set_name(int fence_fd, const char *name);
|
||||
u32 nvhost_sync_pt_id(struct sync_pt *__pt);
|
||||
u32 nvhost_sync_pt_thresh(struct sync_pt *__pt);
|
||||
struct sync_pt *nvhost_sync_pt_from_fence_index(struct sync_fence *fence,
|
||||
u32 sync_pt_index);
|
||||
#else
|
||||
static inline struct sync_fence *nvhost_sync_fdget(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_num_pts(struct sync_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_fence_set_name(int fence_fd, const char *name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_id(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_thresh(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_pt *nvhost_sync_pt_from_fence_index(
|
||||
struct sync_fence *fence, u32 sync_pt_index)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hacky way to get access to struct nvhost_device_data for VI device. */
|
||||
extern struct nvhost_device_data t20_vi_info;
|
||||
extern struct nvhost_device_data t30_vi_info;
|
||||
extern struct nvhost_device_data t11_vi_info;
|
||||
extern struct nvhost_device_data t14_vi_info;
|
||||
|
||||
int nvdec_do_idle(void);
|
||||
int nvdec_do_unidle(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
7805
drivers/crypto/tegra-se-nvhost.c
Normal file
7805
drivers/crypto/tegra-se-nvhost.c
Normal file
File diff suppressed because it is too large
Load Diff
518
drivers/crypto/tegra-se-nvhost.h
Normal file
518
drivers/crypto/tegra-se-nvhost.h
Normal file
@@ -0,0 +1,518 @@
|
||||
/*
|
||||
* Header file for Tegra Security Engine
|
||||
*
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_TEGRA_SE_H
|
||||
#define _CRYPTO_TEGRA_SE_H
|
||||
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sha1.h>
|
||||
|
||||
#define PFX "tegra-se-nvhost: "
|
||||
|
||||
#define ENCRYPT 1
|
||||
#define DECRYPT 0
|
||||
|
||||
#define TEGRA_SE_CRA_PRIORITY 300
|
||||
#define TEGRA_SE_COMPOSITE_PRIORITY 400
|
||||
#define TEGRA_SE_CRYPTO_QUEUE_LENGTH 100
|
||||
#define SE_MAX_SRC_SG_COUNT 50
|
||||
#define SE_MAX_DST_SG_COUNT 50
|
||||
|
||||
#define TEGRA_SE_KEYSLOT_COUNT 16
|
||||
#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF
|
||||
|
||||
/* SE register definitions */
|
||||
#define SE1_AES0_CONFIG_REG_OFFSET 0x204
|
||||
#define SE2_AES1_CONFIG_REG_OFFSET 0x404
|
||||
|
||||
#define SE_AES_CRYPTO_CONFIG_OFFSET 0x4
|
||||
#define SE_AES_IN_ADDR_OFFSET 0x8
|
||||
#define SE_AES_IN_ADDR_HI_OFFSET 0xC
|
||||
#define SE_AES_OUT_ADDR_OFFSET 0x10
|
||||
#define SE_AES_OUT_ADDR_HI_OFFSET 0x14
|
||||
#define SE_AES_CRYPTO_LINEAR_CTR 0x18
|
||||
#define SE_AES_CRYPTO_LAST_BLOCK_OFFSET 0x28
|
||||
#define SE_AES_OPERATION_OFFSET 0x34
|
||||
#define SE_AES_CRYPTO_KEYTABLE_ADDR_OFFSET 0xB8
|
||||
#define SE_AES_CRYPTO_KEYTABLE_DATA_OFFSET 0xBC
|
||||
#define SE_AES_CRYPTO_CTR_SPARE 0xE0
|
||||
#define SE_AES_CTR_LITTLE_ENDIAN 1
|
||||
|
||||
#define SE_CONFIG_ENC_ALG_SHIFT 12
|
||||
#define SE_CONFIG_DEC_ALG_SHIFT 8
|
||||
#define ALG_AES_ENC 1
|
||||
#define ALG_RNG 2
|
||||
#define ALG_SHA 3
|
||||
#define ALG_RSA 4
|
||||
#define ALG_NOP 0
|
||||
#define ALG_AES_DEC 1
|
||||
#define ALG_KEYFETCH 5
|
||||
#define ALG_HMAC 7
|
||||
#define ALG_KDF 8
|
||||
#define ALG_INS 13
|
||||
#define SE_CONFIG_ENC_ALG(x) (x << SE_CONFIG_ENC_ALG_SHIFT)
|
||||
#define SE_CONFIG_DEC_ALG(x) (x << SE_CONFIG_DEC_ALG_SHIFT)
|
||||
#define SE_CONFIG_DST_SHIFT 2
|
||||
#define DST_MEMORY 0
|
||||
#define DST_HASHREG 1
|
||||
#define DST_KEYTAB 2
|
||||
#define DST_SRK 3
|
||||
#define DST_RSAREG 4
|
||||
#define SE_CONFIG_DST(x) (x << SE_CONFIG_DST_SHIFT)
|
||||
#define SE_CONFIG_ENC_MODE_SHIFT 24
|
||||
#define SE_CONFIG_DEC_MODE_SHIFT 16
|
||||
#define MODE_KEY128 0
|
||||
#define MODE_KEY192 1
|
||||
#define MODE_KEY256 2
|
||||
#define MODE_GMAC 3
|
||||
#define MODE_GCM 4
|
||||
#define MODE_GCM_FINAL 5
|
||||
#define MODE_CMAC 7
|
||||
|
||||
#define MODE_SHA1 0
|
||||
#define MODE_SHA224 4
|
||||
#define MODE_SHA256 5
|
||||
#define MODE_SHA384 6
|
||||
#define MODE_SHA512 7
|
||||
#define MODE_SHA3_224 9
|
||||
#define MODE_SHA3_256 10
|
||||
#define MODE_SHA3_384 11
|
||||
#define MODE_SHA3_512 12
|
||||
#define MODE_SHAKE128 13
|
||||
#define MODE_SHAKE256 14
|
||||
#define MODE_HMAC_SHA256_1KEY 0
|
||||
#define MODE_HMAC_SHA256_2KEY 1
|
||||
#define SE_CONFIG_ENC_MODE(x) (x << SE_CONFIG_ENC_MODE_SHIFT)
|
||||
#define SE_CONFIG_DEC_MODE(x) (x << SE_CONFIG_DEC_MODE_SHIFT)
|
||||
|
||||
#define SE_RNG_CONFIG_REG_OFFSET 0x234
|
||||
#define DRBG_MODE_SHIFT 0
|
||||
#define DRBG_MODE_NORMAL 0
|
||||
#define DRBG_MODE_FORCE_INSTANTION 1
|
||||
#define DRBG_MODE_FORCE_RESEED 2
|
||||
#define SE_RNG_CONFIG_MODE(x) (x << DRBG_MODE_SHIFT)
|
||||
|
||||
#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x2d8
|
||||
#define DRBG_RO_ENT_SRC_SHIFT 1
|
||||
#define DRBG_RO_ENT_SRC_ENABLE 1
|
||||
#define DRBG_RO_ENT_SRC_DISABLE 0
|
||||
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) (x << DRBG_RO_ENT_SRC_SHIFT)
|
||||
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
|
||||
#define DRBG_RO_ENT_SRC_LOCK_ENABLE 1
|
||||
#define DRBG_RO_ENT_SRC_LOCK_DISABLE 0
|
||||
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) (x << DRBG_RO_ENT_SRC_LOCK_SHIFT)
|
||||
|
||||
#define DRBG_SRC_SHIFT 2
|
||||
#define DRBG_SRC_NONE 0
|
||||
#define DRBG_SRC_ENTROPY 1
|
||||
#define DRBG_SRC_LFSR 2
|
||||
#define SE_RNG_CONFIG_SRC(x) (x << DRBG_SRC_SHIFT)
|
||||
|
||||
#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x2dc
|
||||
|
||||
#define SE_KEYTABLE_REG_OFFSET 0x31c
|
||||
#define SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL_SHIFT 3
|
||||
#define SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL(x) \
|
||||
(x << SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL_SHIFT)
|
||||
#define SUBKEY_SEL_KEY1 0
|
||||
#define SUBKEY_SEL_KEY2 1
|
||||
#define SE_KEYTABLE_SLOT_SHIFT 4
|
||||
#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT)
|
||||
#define SE_KEYTABLE_QUAD_SHIFT 2
|
||||
#define QUAD_KEYS_128 0
|
||||
#define QUAD_KEYS_192 1
|
||||
#define QUAD_KEYS_256 1
|
||||
#define QUAD_ORG_IV 2
|
||||
#define QUAD_UPDTD_IV 3
|
||||
#define SE_KEYTABLE_QUAD(x) (x << SE_KEYTABLE_QUAD_SHIFT)
|
||||
#define SE_KEYTABLE_OP_TYPE_SHIFT 9
|
||||
#define OP_READ 0
|
||||
#define OP_WRITE 1
|
||||
#define SE_KEYTABLE_OP_TYPE(x) (x << SE_KEYTABLE_OP_TYPE_SHIFT)
|
||||
#define SE_KEYTABLE_TABLE_SEL_SHIFT 8
|
||||
#define TABLE_KEYIV 0
|
||||
#define TABLE_SCHEDULE 1
|
||||
#define SE_KEYTABLE_TABLE_SEL(x) (x << SE_KEYTABLE_TABLE_SEL_SHIFT)
|
||||
#define SE_KEYTABLE_PKT_SHIFT 0
|
||||
#define SE_KEYTABLE_PKT(x) (x << SE_KEYTABLE_PKT_SHIFT)
|
||||
|
||||
#define SE_OP_DONE_SHIFT 4
|
||||
#define OP_DONE 1
|
||||
#define SE_OP_DONE(x, y) ((x) && (y << SE_OP_DONE_SHIFT))
|
||||
|
||||
#define SE_CRYPTO_HASH_SHIFT 0
|
||||
#define HASH_DISABLE 0
|
||||
#define HASH_ENABLE 1
|
||||
#define SE_CRYPTO_HASH(x) (x << SE_CRYPTO_HASH_SHIFT)
|
||||
|
||||
#define SE4_SHA_IN_ADDR_OFFSET 0x8
|
||||
#define SE4_SHA_TASK_CONFIG 0x108
|
||||
#define HW_INIT_HASH_DISABLE 0
|
||||
#define HW_INIT_HASH_ENABLE 1
|
||||
#define SE4_HW_INIT_HASH_SHIFT 0
|
||||
#define SE4_HW_INIT_HASH(x) (x << SE4_HW_INIT_HASH_SHIFT)
|
||||
|
||||
#define SE_CRYPTO_XOR_POS_SHIFT 1
|
||||
#define XOR_BYPASS 0
|
||||
#define XOR_BOTH 1
|
||||
#define XOR_TOP 2
|
||||
#define XOR_BOTTOM 3
|
||||
#define SE_CRYPTO_XOR_POS(x) (x << SE_CRYPTO_XOR_POS_SHIFT)
|
||||
#define SE_CRYPTO_INPUT_SEL_SHIFT 3
|
||||
#define INPUT_MEMORY 0
|
||||
#define INPUT_RANDOM 1
|
||||
#define INPUT_AESOUT 2
|
||||
#define INPUT_LNR_CTR 3
|
||||
#define SE_CRYPTO_INPUT_SEL(x) (x << SE_CRYPTO_INPUT_SEL_SHIFT)
|
||||
#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5
|
||||
#define VCTRAM_MEMORY 0
|
||||
#define VCTRAM_TWEAK 1
|
||||
#define VCTRAM_AESOUT 2
|
||||
#define VCTRAM_PREVAHB 3
|
||||
#define SE_CRYPTO_VCTRAM_SEL(x) (x << SE_CRYPTO_VCTRAM_SEL_SHIFT)
|
||||
#define SE_CRYPTO_IV_SEL_SHIFT 7
|
||||
#define IV_ORIGINAL 0
|
||||
#define IV_UPDATED 1
|
||||
#define IV_REG 2
|
||||
#define SE_CRYPTO_IV_SEL(x) (x << SE_CRYPTO_IV_SEL_SHIFT)
|
||||
#define SE_CRYPTO_CORE_SEL_SHIFT 9
|
||||
#define CORE_DECRYPT 0
|
||||
#define CORE_ENCRYPT 1
|
||||
#define SE_CRYPTO_CORE_SEL(x) (x << SE_CRYPTO_CORE_SEL_SHIFT)
|
||||
#define SE_CRYPTO_KEY2_INDEX_SHIFT 28
|
||||
#define SE_CRYPTO_KEY2_INDEX(x) (x << SE_CRYPTO_KEY2_INDEX_SHIFT)
|
||||
#define SE_CRYPTO_KEY_INDEX_SHIFT 24
|
||||
#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT)
|
||||
#define SE_CRYPTO_CTR_CNTN_SHIFT 11
|
||||
#define SE_CRYPTO_CTR_CNTN(x) (x << SE_CRYPTO_CTR_CNTN_SHIFT)
|
||||
|
||||
#define SE_CRYPTO_CTR_REG_COUNT 4
|
||||
|
||||
#define OP_START 1
|
||||
#define OP_RESTART_OUT 2
|
||||
#define OP_CTX_SAVE 3
|
||||
#define OP_RESTART_IN 4
|
||||
#define OP_RESTART_INOUT 5
|
||||
#define OP_DUMMY 6
|
||||
#define SE_OPERATION_OP_SHIFT 0
|
||||
#define SE_OPERATION_OP(x) (x << SE_OPERATION_OP_SHIFT)
|
||||
|
||||
#define SE_OPERATION_LASTBUF_SHIFT 16
|
||||
#define SE_OPERATION_LASTBUF(x) (x << SE_OPERATION_LASTBUF_SHIFT)
|
||||
#define LASTBUF_TRUE 1
|
||||
#define LASTBUF_FALSE 0
|
||||
|
||||
#define SE_OPERATION_WRSTALL_SHIFT 15
|
||||
#define SE_OPERATION_WRSTALL(x) (x << SE_OPERATION_WRSTALL_SHIFT)
|
||||
#define WRSTALL_TRUE 1
|
||||
#define WRSTALL_FALSE 0
|
||||
|
||||
#define SE_OPERATION_FINAL_SHIFT 5
|
||||
#define SE_OPERATION_FINAL(x) (x << SE_OPERATION_FINAL_SHIFT)
|
||||
#define FINAL_TRUE 1
|
||||
#define FINAL_FALSE 0
|
||||
|
||||
#define SE_OPERATION_INIT_SHIFT 4
|
||||
#define SE_OPERATION_INIT(x) (x << SE_OPERATION_INIT_SHIFT)
|
||||
#define INIT_TRUE 1
|
||||
#define INIT_FALSE 0
|
||||
|
||||
#define SE_ADDR_HI_MSB_SHIFT 24
|
||||
#define SE_ADDR_HI_SZ_SHIFT 0
|
||||
#define SE_ADDR_HI_MSB(x) (x << SE_ADDR_HI_MSB_SHIFT)
|
||||
#define MSB(x) ((x & 0xFF00000000) >> 32)
|
||||
#define SE_ADDR_HI_SZ(x) (x << SE_ADDR_HI_SZ_SHIFT)
|
||||
|
||||
#define SE_LAST_BLOCK_RESIDUAL_BITS_SHIFT 20
|
||||
#define SE_LAST_BLOCK_RESIDUAL_BITS(x) (x << SE_LAST_BLOCK_RESIDUAL_BITS_SHIFT)
|
||||
|
||||
#define SE_BUFF_SIZE_MASK 0xFF000000
|
||||
|
||||
#define SE_MAX_TASKS_PER_SUBMIT 64
|
||||
#define SE_MAX_SUBMIT_CHAIN_SZ 10
|
||||
#define SE_WORD_SIZE_BYTES 4
|
||||
|
||||
#define SE_MAX_MEM_ALLOC 4194304
|
||||
#define SE_MAX_GATHER_BUF_SZ 32768
|
||||
#define SE_MAX_AESBUF_ALLOC (SE_MAX_MEM_ALLOC / SE_MAX_GATHER_BUF_SZ)
|
||||
#define SE_MAX_AESBUF_TIMEOUT (20 * SE_MAX_AESBUF_ALLOC)
|
||||
|
||||
/* FIXME: The below 2 macros should fine tuned
|
||||
* based on discussions with CPU team
|
||||
*/
|
||||
#define SE_MAX_CMDBUF_TIMEOUT (200 * SE_MAX_SUBMIT_CHAIN_SZ)
|
||||
#define SE_WAIT_UDELAY 500 /* micro seconds */
|
||||
|
||||
#define SE_KEYSLOT_TIMEOUT 100
|
||||
#define SE_KEYSLOT_MDELAY 1000
|
||||
|
||||
#define SE_INT_ENABLE_REG_OFFSET 0x88
|
||||
#define SE1_INT_ENABLE_SHIFT 1
|
||||
#define SE1_INT_ENABLE(x) (x << SE1_INT_ENABLE_SHIFT)
|
||||
#define SE2_INT_ENABLE_SHIFT 0
|
||||
#define SE2_INT_ENABLE(x) (x << SE2_INT_ENABLE_SHIFT)
|
||||
#define SE3_INT_ENABLE_SHIFT 2
|
||||
#define SE3_INT_ENABLE(x) (x << SE3_INT_ENABLE_SHIFT)
|
||||
#define SE4_INT_ENABLE_SHIFT 3
|
||||
#define SE4_INT_ENABLE(x) (x << SE4_INT_ENABLE_SHIFT)
|
||||
|
||||
#define INT_DISABLE 0
|
||||
#define INT_ENABLE 1
|
||||
|
||||
#define SE1_AES0_INT_ENABLE_OFFSET 0x2EC
|
||||
#define SE2_AES1_INT_ENABLE_OFFSET 0x4EC
|
||||
#define SE3_RSA_INT_ENABLE_OFFSET 0x754
|
||||
#define SE4_SHA_INT_ENABLE_OFFSET 0x180
|
||||
|
||||
#define SE1_AES0_INT_STATUS_REG_OFFSET 0x2F0
|
||||
#define SE2_AES1_INT_STATUS_REG_OFFSET 0x4F0
|
||||
#define SE3_RSA_INT_STATUS_REG_OFFSET 0x758
|
||||
#define SE4_SHA_INT_STATUS_REG_OFFSET 0x184
|
||||
|
||||
#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0X330
|
||||
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0
|
||||
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \
|
||||
(x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT)
|
||||
|
||||
#define SE_KEYTABLE_QUAD_SIZE_BYTES 16
|
||||
|
||||
#define SE_SPARE_0_REG_OFFSET 0x80c
|
||||
|
||||
#define TEGRA_SE_SHA_MAX_BLOCK_SIZE 128
|
||||
|
||||
#define SE4_SHA_CONFIG_REG_OFFSET 0x104
|
||||
#define SE_SHA_MSG_LENGTH_OFFSET 0x18
|
||||
#define SE_SHA_OPERATION_OFFSET 0x78
|
||||
#define SE_SHA_HASH_LENGTH 0xa8
|
||||
|
||||
#define SHA_DISABLE 0
|
||||
#define SHA_ENABLE 1
|
||||
|
||||
#define SE_HASH_RESULT_REG_OFFSET 0x13c
|
||||
#define SE_CMAC_RESULT_REG_OFFSET 0x4c4
|
||||
#define T234_SE_CMAC_RESULT_REG_OFFSET 0x0c0
|
||||
|
||||
#define SE_STATIC_MEM_ALLOC_BUFSZ 512
|
||||
|
||||
#define TEGRA_SE_KEY_256_SIZE 32
|
||||
#define TEGRA_SE_KEY_512_SIZE 64
|
||||
#define TEGRA_SE_KEY_192_SIZE 24
|
||||
#define TEGRA_SE_KEY_128_SIZE 16
|
||||
#define TEGRA_SE_AES_BLOCK_SIZE 16
|
||||
#define TEGRA_SE_AES_MIN_KEY_SIZE 16
|
||||
#define TEGRA_SE_AES_MAX_KEY_SIZE 64
|
||||
#define TEGRA_SE_AES_IV_SIZE 16
|
||||
#define TEGRA_SE_RNG_IV_SIZE 16
|
||||
#define TEGRA_SE_RNG_DT_SIZE 16
|
||||
#define TEGRA_SE_RNG_KEY_SIZE 16
|
||||
#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \
|
||||
TEGRA_SE_RNG_KEY_SIZE + \
|
||||
TEGRA_SE_RNG_DT_SIZE)
|
||||
#define TEGRA_SE_AES_CMAC_DIGEST_SIZE 16
|
||||
#define TEGRA_SE_AES_CBC_MAC_DIGEST_SIZE 16
|
||||
#define TEGRA_SE_RSA512_INPUT_SIZE 64
|
||||
#define TEGRA_SE_RSA1024_INPUT_SIZE 128
|
||||
#define TEGRA_SE_RSA1536_INPUT_SIZE 192
|
||||
#define TEGRA_SE_RSA2048_INPUT_SIZE 256
|
||||
|
||||
#define TEGRA_SE_AES_CMAC_STATE_SIZE 16
|
||||
#define SHA1_STATE_SIZE 20
|
||||
#define SHA224_STATE_SIZE 32
|
||||
#define SHA256_STATE_SIZE 32
|
||||
#define SHA384_STATE_SIZE 64
|
||||
#define SHA512_STATE_SIZE 64
|
||||
#define SHA3_224_STATE_SIZE 200
|
||||
#define SHA3_256_STATE_SIZE 200
|
||||
#define SHA3_384_STATE_SIZE 200
|
||||
#define SHA3_512_STATE_SIZE 200
|
||||
|
||||
#define TEGRA_SE_RSA_KEYSLOT_COUNT 4
|
||||
#define SE_RSA_OUTPUT 0x628
|
||||
|
||||
#define RSA_KEY_SLOT_ONE 0
|
||||
#define RSA_KEY_SLOT_TW0 1
|
||||
#define RSA_KEY_SLOT_THREE 2
|
||||
#define RSA_KEY_SLOT_FOUR 3
|
||||
#define RSA_KEY_NUM_SHIFT 7
|
||||
#define RSA_KEY_NUM(x) (x << RSA_KEY_NUM_SHIFT)
|
||||
|
||||
#define RSA_KEY_TYPE_EXP 0
|
||||
#define RSA_KEY_TYPE_MOD 1
|
||||
#define RSA_KEY_TYPE_SHIFT 6
|
||||
#define RSA_KEY_TYPE(x) (x << RSA_KEY_TYPE_SHIFT)
|
||||
|
||||
#define RSA_KEY_SLOT_SHIFT 23
|
||||
#define RSA_KEY_SLOT(x) (x << RSA_KEY_SLOT_SHIFT)
|
||||
|
||||
#define SE3_RSA_CONFIG_REG_OFFSET 0x604
|
||||
#define SE_RSA_OPERATION_OFFSET 0x20
|
||||
#define SE_RSA_KEYTABLE_ADDR_OFFSET 0x148
|
||||
#define SE_RSA_KEYTABLE_DATA_OFFSET 0x14C
|
||||
|
||||
#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0
|
||||
#define RSA_KEY_PKT_WORD_ADDR(x) (x << RSA_KEY_PKT_WORD_ADDR_SHIFT)
|
||||
|
||||
#define SE_RSA_KEYTABLE_PKT_SHIFT 0
|
||||
#define SE_RSA_KEYTABLE_PKT(x) (x << SE_RSA_KEYTABLE_PKT_SHIFT)
|
||||
|
||||
#define SE_MAGIC_PATTERN 0x4E56
|
||||
#define SE_STORE_KEY_IN_MEM 0x0001
|
||||
#define SE_SLOT_NUM_MASK 0xF000
|
||||
#define SE_SLOT_POSITION 12
|
||||
#define SE_KEY_LEN_MASK 0x3FF
|
||||
#define SE_MAGIC_PATTERN_OFFSET 16
|
||||
#define SE_STREAMID_REG_OFFSET 0x90
|
||||
|
||||
#define SE_AES_CRYPTO_AAD_LENGTH_0_OFFSET 0x128
|
||||
#define SE_AES_CRYPTO_MSG_LENGTH_0_OFFSET 0x130
|
||||
|
||||
#define SE_AES_GCM_GMAC_SIZE 16
|
||||
|
||||
/* Key manifest */
|
||||
#define SE_KEYMANIFEST_ORIGIN(x) (x << 0)
|
||||
|
||||
#define SE_KEYMANIFEST_USER(x) (x << 4)
|
||||
#define NS 3
|
||||
|
||||
#define SE_KEYMANIFEST_PURPOSE(x) (x << 8)
|
||||
#define ENC 0
|
||||
#define CMAC 1
|
||||
#define HMAC 2
|
||||
#define KW 3
|
||||
#define KUW 4
|
||||
#define KWUW 5
|
||||
#define KDK 6
|
||||
#define KDD 7
|
||||
#define KDD_KUW 8
|
||||
#define XTS 9
|
||||
#define GCM 10
|
||||
|
||||
#define SE_KEYMANIFEST_SIZE(x) (x << 14)
|
||||
#define KEY128 0
|
||||
#define KEY192 1
|
||||
#define KEY256 2
|
||||
|
||||
#define SE_KEYMANIFEST_EX(x) (x << 12)
|
||||
|
||||
#define SE_AES_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET 0x110
|
||||
|
||||
#define SE_AES_CRYPTO_KEYTABLE_DST_OFFSET 0x2c
|
||||
|
||||
#define SE_AES_KEY_INDEX(x) (x << 8)
|
||||
|
||||
#define SE_SHA_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET 0x98
|
||||
#define SE_SHA_CRYPTO_KEYTABLE_DST_OFFSET 0xa4
|
||||
#define SE_SHA_CRYPTO_KEYTABLE_ADDR_OFFSET 0x90
|
||||
#define SE_SHA_CRYPTO_KEYTABLE_DATA_OFFSET 0x94
|
||||
|
||||
/* cdma opcodes */
|
||||
#if 0
|
||||
static inline u32 nvhost_opcode_setclass(
|
||||
unsigned class_id, unsigned offset, unsigned mask)
|
||||
{
|
||||
return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (2 << 28) | (offset << 16) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
|
||||
{
|
||||
return (3 << 28) | (offset << 16) | mask;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
|
||||
{
|
||||
return (4 << 28) | (offset << 16) | value;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_restart(unsigned address)
|
||||
{
|
||||
return (5 << 28) | (address >> 4);
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_gather_insert(unsigned offset, unsigned incr,
|
||||
unsigned count)
|
||||
{
|
||||
return (6 << 28) | (offset << 16) | BIT(15) | (incr << 14) | count;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_setstreamid(unsigned streamid)
|
||||
{
|
||||
return (7 << 28) | streamid;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_setpayload(unsigned payload)
|
||||
{
|
||||
return (9 << 28) | payload;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_incr_w(unsigned int offset)
|
||||
{
|
||||
/* 20-bit offset supported */
|
||||
return (10 << 28) | offset;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_nonincr_w(unsigned int offset)
|
||||
{
|
||||
/* 20-bit offset supported */
|
||||
return (11 << 28) | offset;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_acquire_mlock(unsigned id)
|
||||
{
|
||||
return (14 << 28) | id;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_opcode_release_mlock(unsigned id)
|
||||
{
|
||||
return (14 << 28) | (1 << 24) | id;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_class_host_incr_syncpt_base(
|
||||
unsigned base_indx, unsigned offset)
|
||||
{
|
||||
return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
|
||||
| host1x_uclass_incr_syncpt_base_offset_f(offset);
|
||||
}
|
||||
|
||||
static inline u32 nvhost_class_host_incr_syncpt(
|
||||
unsigned cond, unsigned indx)
|
||||
{
|
||||
return host1x_uclass_incr_syncpt_cond_f(cond)
|
||||
| host1x_uclass_incr_syncpt_indx_f(indx);
|
||||
}
|
||||
|
||||
#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
|
||||
|
||||
static inline u32 nvhost_mask2(unsigned x, unsigned y)
|
||||
{
|
||||
return 1 | (1 << (y - x));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _CRYPTO_TEGRA_SE_H */
|
||||
387
drivers/crypto/tegra-se-nvrng.c
Normal file
387
drivers/crypto/tegra-se-nvrng.c
Normal file
@@ -0,0 +1,387 @@
|
||||
/*
|
||||
* drivers/crypto/tegra-se-nvrng.c
|
||||
*
|
||||
* Support for Tegra NVRNG Engine Error Handling.
|
||||
*
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/* RNG1 offsets */
|
||||
#define NV_NVRNG_R_IE_0 0x80
|
||||
#define NV_NVRNG_R_ISTAT_0 0x84
|
||||
#define NV_NVRNG_R_CTRL0_0 0x90
|
||||
#define SW_ENGINE_ENABLED (1 << 2)
|
||||
#define NV_NVRNG_R_CTRL1_0 0x90
|
||||
|
||||
/* SAP offsets */
|
||||
#define SE0_SOFTRESET_0 0x60
|
||||
#define SE0_INT_ENABLE_0 0x88
|
||||
#define SC7_CTX_INTEGRITY_ERROR (1 << 7)
|
||||
#define SC7_CTX_START_ERROR (1 << 6)
|
||||
#define SE0_INT_STATUS_0 0x8c
|
||||
#define SE0_SC7_CTRL_0 0xbc
|
||||
#define SC7_CTX_SAVE 0
|
||||
#define SC7_CTX_RESTORE 1
|
||||
#define SE0_SC7_STATUS_0 0xc0
|
||||
#define IDLE 0
|
||||
#define BUSY 1
|
||||
#define SE0_FEATURES_0 0x114
|
||||
#define CAP_RNG1 (1 << 1)
|
||||
#define CAP_HOST1X (1 << 0)
|
||||
|
||||
#define SC7_IDLE_TIMEOUT_2000MS 2000000 /* 2sec */
|
||||
#define SC7_IDLE_TIMEOUT_200MS 200000 /* 200 MS */
|
||||
#define RESET_TIMEOUT_100MS 100000
|
||||
|
||||
#define HALTED 0x4
|
||||
#define STARTUP_DONE 0x2
|
||||
#define ERROR 0x1
|
||||
|
||||
#define HALT 0x10
|
||||
#define SOFT_RST 0x1
|
||||
|
||||
#define CLK_RATE 38400
|
||||
|
||||
struct tegra_se_nvrng_dev {
|
||||
void __iomem *rng1_base;
|
||||
void __iomem *sap_base;
|
||||
int irq;
|
||||
struct clk *clk;
|
||||
};
|
||||
|
||||
static unsigned int tegra_se_nvrng_readl(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
unsigned int offset)
|
||||
{
|
||||
return readl(nvrng_dev->rng1_base + offset);
|
||||
}
|
||||
|
||||
static void tegra_se_nvrng_writel(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
unsigned int offset, unsigned int value)
|
||||
{
|
||||
writel(value, nvrng_dev->rng1_base + offset);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static unsigned int tegra_se_sap_readl(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
unsigned int offset)
|
||||
{
|
||||
return readl(nvrng_dev->sap_base + offset);
|
||||
}
|
||||
|
||||
static void tegra_se_sap_writel(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
unsigned int offset, unsigned int value)
|
||||
{
|
||||
writel(value, nvrng_dev->sap_base + offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
static irqreturn_t tegra_se_nvrng_isr(int irq, void *dev_id)
|
||||
{
|
||||
int handled = 0;
|
||||
unsigned int mask, status;
|
||||
struct tegra_se_nvrng_dev *nvrng_dev = dev_id;
|
||||
|
||||
/* Handle the interrupt if issued for an error condition.
|
||||
* Ignore the interrupt otherwise.
|
||||
*/
|
||||
status = tegra_se_nvrng_readl(nvrng_dev, NV_NVRNG_R_ISTAT_0);
|
||||
if (status & ERROR) {
|
||||
mask = tegra_se_nvrng_readl(nvrng_dev, NV_NVRNG_R_IE_0);
|
||||
|
||||
/* Disable STARTUP_DONE & ERROR interrupts. */
|
||||
mask &= ~(STARTUP_DONE | ERROR);
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0, mask);
|
||||
|
||||
/* Halt NVRNG and enable HALT interrupt. */
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_CTRL1_0, HALT);
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0, HALTED);
|
||||
|
||||
handled = 1;
|
||||
} else if (status & HALTED) {
|
||||
mask = tegra_se_nvrng_readl(nvrng_dev, NV_NVRNG_R_IE_0);
|
||||
|
||||
/* Disable HALT interrupt. */
|
||||
mask &= ~HALTED;
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0, mask);
|
||||
|
||||
/* Soft reset NVRNG and enable STARTUP_DONE interrupt. */
|
||||
mask |= STARTUP_DONE;
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_CTRL1_0, SOFT_RST);
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0, STARTUP_DONE);
|
||||
handled = 1;
|
||||
} else {
|
||||
/* Soft reset complete, enable ERROR interrupt*/
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0, ERROR);
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static int tegra_se_nvrng_request_irq(struct tegra_se_nvrng_dev *nvrng_dev)
|
||||
{
|
||||
int ret;
|
||||
unsigned int mask;
|
||||
|
||||
ret = request_irq(nvrng_dev->irq, tegra_se_nvrng_isr, 0,
|
||||
"tegra-se-nvrng", nvrng_dev);
|
||||
|
||||
/* Set NV_NVRNG_R_IE_0.ERROR = Enabled.
|
||||
* This will enable interrupts for errors.
|
||||
*/
|
||||
mask = tegra_se_nvrng_readl(nvrng_dev, NV_NVRNG_R_IE_0);
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_IE_0,
|
||||
mask | ERROR);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_se_nvrng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra_se_nvrng_dev *nvrng_dev;
|
||||
|
||||
nvrng_dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_se_nvrng_dev),
|
||||
GFP_KERNEL);
|
||||
if (!nvrng_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
nvrng_dev->rng1_base = devm_platform_ioremap_resource_byname(pdev,
|
||||
"rng1");
|
||||
if (IS_ERR(nvrng_dev->rng1_base))
|
||||
return PTR_ERR(nvrng_dev->rng1_base);
|
||||
|
||||
nvrng_dev->sap_base = devm_platform_ioremap_resource_byname(pdev,
|
||||
"sap");
|
||||
if (IS_ERR(nvrng_dev->sap_base))
|
||||
return PTR_ERR(nvrng_dev->sap_base);
|
||||
|
||||
nvrng_dev->irq = platform_get_irq(pdev, 0);
|
||||
if (nvrng_dev->irq < 0) {
|
||||
if (nvrng_dev->irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "cannot obtain irq\n");
|
||||
return nvrng_dev->irq;
|
||||
}
|
||||
|
||||
nvrng_dev->clk = devm_clk_get(&pdev->dev, "se");
|
||||
if (IS_ERR(nvrng_dev->clk))
|
||||
return PTR_ERR(nvrng_dev->clk);
|
||||
|
||||
clk_prepare_enable(nvrng_dev->clk);
|
||||
clk_set_rate(nvrng_dev->clk, CLK_RATE);
|
||||
|
||||
platform_set_drvdata(pdev, nvrng_dev);
|
||||
|
||||
return tegra_se_nvrng_request_irq(nvrng_dev);
|
||||
}
|
||||
|
||||
static int tegra_se_nvrng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra_se_nvrng_dev *nvrng_dev =
|
||||
(struct tegra_se_nvrng_dev *)platform_get_drvdata(pdev);
|
||||
|
||||
free_irq(nvrng_dev->irq, nvrng_dev);
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
devm_clk_put(&pdev->dev, nvrng_dev->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int tegra_se_sc7_check_idle(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
u32 timeout_us)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
return readl_poll_timeout(nvrng_dev->sap_base + SE0_SC7_STATUS_0, val,
|
||||
(val & 0x5f) == 0x5f, 10, timeout_us);
|
||||
}
|
||||
|
||||
static int tegra_se_softreset(struct tegra_se_nvrng_dev *nvrng_dev)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
tegra_se_sap_writel(nvrng_dev, SE0_SOFTRESET_0, TRUE);
|
||||
|
||||
return readl_poll_timeout(nvrng_dev->sap_base + SE0_SOFTRESET_0, val,
|
||||
val == FALSE, 10, RESET_TIMEOUT_100MS);
|
||||
}
|
||||
|
||||
static int tegra_se_sc7_check_error(struct tegra_se_nvrng_dev *nvrng_dev,
|
||||
bool resume)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = tegra_se_sc7_check_idle(nvrng_dev, SC7_IDLE_TIMEOUT_200MS);
|
||||
if (ret == -ETIMEDOUT) {
|
||||
pr_err("%s:%d SE HW is not idle, timeout\n",
|
||||
__func__, __LINE__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = tegra_se_sap_readl(nvrng_dev, SE0_INT_STATUS_0);
|
||||
if (val & SC7_CTX_START_ERROR) {
|
||||
/* Write 1 to clear */
|
||||
tegra_se_sap_writel(nvrng_dev, SE0_INT_STATUS_0,
|
||||
SC7_CTX_START_ERROR);
|
||||
pr_err("%s:%d SC7 start error\n", __func__, __LINE__);
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (resume && !ret) {
|
||||
val = tegra_se_sap_readl(nvrng_dev, SE0_FEATURES_0);
|
||||
if (val != (CAP_RNG1 | CAP_HOST1X)) {
|
||||
pr_err("%s:%d SC7 SE features fail disable engine\n",
|
||||
__func__, __LINE__);
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_se_nvrng_suspend(struct device *dev)
|
||||
{
|
||||
struct tegra_se_nvrng_dev *nvrng_dev = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
/* 1. Enable clock */
|
||||
clk_prepare_enable(nvrng_dev->clk);
|
||||
|
||||
/* 2. Program NV_NVRNG_R_CTRL0_0.SW_ENGINE_ENABLED to true */
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_CTRL0_0, SW_ENGINE_ENABLED);
|
||||
|
||||
/* WAR for bug 200735620 */
|
||||
ret = tegra_se_softreset(nvrng_dev);
|
||||
if (ret) {
|
||||
pr_err("%s:%d SE softreset failed\n", __func__, __LINE__);
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 3. Check SE0_SC7_STATUS_0 is 0x5f for HW to be IDLE */
|
||||
ret = tegra_se_sc7_check_idle(nvrng_dev, SC7_IDLE_TIMEOUT_2000MS);
|
||||
if (ret == -ETIMEDOUT) {
|
||||
pr_err("%s:%d SE HW is not idle couldn't suspend\n",
|
||||
__func__, __LINE__);
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 4. Trigger SC7 context save */
|
||||
tegra_se_sap_writel(nvrng_dev, SE0_SC7_CTRL_0, SC7_CTX_SAVE);
|
||||
|
||||
/* 5. Check for SC7 start errors */
|
||||
ret = tegra_se_sc7_check_error(nvrng_dev, false);
|
||||
|
||||
/* 6. Disable clock */
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
|
||||
pr_debug("%s:%d resume complete\n", __func__, __LINE__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_se_nvrng_resume(struct device *dev)
|
||||
{
|
||||
struct tegra_se_nvrng_dev *nvrng_dev = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
/* 1. Enable clock */
|
||||
clk_prepare_enable(nvrng_dev->clk);
|
||||
|
||||
/* 2. Program NV_NVRNG_R_CTRL0_0.SW_ENGINE_ENABLED to true */
|
||||
tegra_se_nvrng_writel(nvrng_dev, NV_NVRNG_R_CTRL0_0, SW_ENGINE_ENABLED);
|
||||
|
||||
/* 3. Check SE0_SC7_STATUS_0 is 0x5f for HW to be IDLE */
|
||||
ret = tegra_se_sc7_check_idle(nvrng_dev, SC7_IDLE_TIMEOUT_2000MS);
|
||||
if (ret == -ETIMEDOUT) {
|
||||
pr_err("%s:%d SE HW is not idle couldn't resume\n",
|
||||
__func__, __LINE__);
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 4. Trigger SC7 context restore */
|
||||
tegra_se_sap_writel(nvrng_dev, SE0_SC7_CTRL_0, SC7_CTX_RESTORE);
|
||||
|
||||
/* 5. Check for SC7 start errors */
|
||||
ret = tegra_se_sc7_check_error(nvrng_dev, true);
|
||||
|
||||
/* 6. Disable clock */
|
||||
clk_disable_unprepare(nvrng_dev->clk);
|
||||
|
||||
pr_debug("%s:%d resume complete\n", __func__, __LINE__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops tegra_se_nvrng_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(tegra_se_nvrng_suspend, tegra_se_nvrng_resume)
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id tegra_se_nvrng_acpi_match[] = {
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, tegra_se_nvrng_acpi_match);
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
static const struct of_device_id tegra_se_nvrng_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra234-se-nvrng" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_se_nvrng_of_match);
|
||||
|
||||
static struct platform_driver tegra_se_nvrng_driver = {
|
||||
.probe = tegra_se_nvrng_probe,
|
||||
.remove = tegra_se_nvrng_remove,
|
||||
.driver = {
|
||||
.name = "tegra-se-nvrng",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(tegra_se_nvrng_of_match),
|
||||
.pm = &tegra_se_nvrng_pm_ops,
|
||||
#ifdef CONFIG_ACPI
|
||||
.acpi_match_table = ACPI_PTR(tegra_se_nvrng_acpi_match),
|
||||
#endif /* CONFIG_ACPI */
|
||||
},
|
||||
};
|
||||
|
||||
static int __init tegra_se_nvrng_module_init(void)
|
||||
{
|
||||
return platform_driver_register(&tegra_se_nvrng_driver);
|
||||
}
|
||||
|
||||
static void __exit tegra_se_nvrng_module_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&tegra_se_nvrng_driver);
|
||||
}
|
||||
|
||||
module_init(tegra_se_nvrng_module_init);
|
||||
module_exit(tegra_se_nvrng_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Kartik <kkartik@nvidia.com>");
|
||||
MODULE_DESCRIPTION("Tegra Crypto NVRNG error handling support");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("tegra-se-nvrng");
|
||||
Reference in New Issue
Block a user