diff --git a/dkms.conf b/dkms.conf index 8d17880e..98e32d10 100644 --- a/dkms.conf +++ b/dkms.conf @@ -83,8 +83,8 @@ BUILT_MODULE_NAME[19]="tegra-pcie-dma-test" BUILT_MODULE_LOCATION[19]="drivers/misc" DEST_MODULE_LOCATION[19]="/extra" -BUILT_MODULE_NAME[20]="tegra-se-nvhost" -BUILT_MODULE_LOCATION[20]="drivers/crypto" +BUILT_MODULE_NAME[20]="tegra-se" +BUILT_MODULE_LOCATION[20]="drivers/crypto/tegra" DEST_MODULE_LOCATION[20]="/extra" BUILT_MODULE_NAME[21]="tegra-se-nvrng" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 4d5e7104..bf6b149c 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -2,7 +2,6 @@ # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. ifdef CONFIG_TEGRA_HOST1X -obj-m += tegra-se-nvhost.o obj-m += tegra-hv-vse-safety.o obj-m += tegra-nvvse-cryptodev.o ifdef CONFIG_CRYPTO_ENGINE diff --git a/drivers/crypto/hardware_t194.h b/drivers/crypto/hardware_t194.h deleted file mode 100644 index 32c96d5c..00000000 --- a/drivers/crypto/hardware_t194.h +++ /dev/null @@ -1,163 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2016-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * - * Tegra T194 HOST1X Register Definitions - */ - -#ifndef __NVHOST_HARDWARE_T194_H -#define __NVHOST_HARDWARE_T194_H - -#include -#include -#include - -/* sync registers */ -#define NV_HOST1X_SYNCPT_NB_PTS 704 -#define NV_HOST1X_NB_MLOCKS 32 - -#define NV_HOST1X_MLOCK_ID_NVCSI 7 -#define NV_HOST1X_MLOCK_ID_ISP 8 -#define NV_HOST1X_MLOCK_ID_VI 16 -#define NV_HOST1X_MLOCK_ID_VIC 17 -#define NV_HOST1X_MLOCK_ID_NVENC 18 -#define NV_HOST1X_MLOCK_ID_NVDEC 19 -#define NV_HOST1X_MLOCK_ID_NVJPG 20 -#define NV_HOST1X_MLOCK_ID_TSEC 21 -#define NV_HOST1X_MLOCK_ID_TSECB 22 -#define NV_HOST1X_MLOCK_ID_NVENC1 29 -#define NV_HOST1X_MLOCK_ID_NVDEC1 31 - -#define HOST1X_THOST_ACTMON_NVENC 0x00000 -#define HOST1X_THOST_ACTMON_VIC 0x10000 -#define HOST1X_THOST_ACTMON_NVDEC 0x20000 -#define HOST1X_THOST_ACTMON_NVJPG 0x30000 -#define HOST1X_THOST_ACTMON_NVENC1 0x40000 -#define HOST1X_THOST_ACTMON_NVDEC1 0x50000 - -/* Generic support */ -static inline u32 nvhost_class_host_wait_syncpt( - unsigned indx, unsigned threshold) -{ - return (indx << 24) | (threshold & 0xffffff); -} - -static inline u32 nvhost_class_host_load_syncpt_base( - unsigned indx, unsigned threshold) -{ - return host1x_uclass_wait_syncpt_indx_f(indx) - | host1x_uclass_wait_syncpt_thresh_f(threshold); -} - -static inline u32 nvhost_class_host_incr_syncpt( - unsigned cond, unsigned indx) -{ - return host1x_uclass_incr_syncpt_cond_f(cond) - | host1x_uclass_incr_syncpt_indx_f(indx); -} - -enum { - NV_HOST_MODULE_HOST1X = 0, - NV_HOST_MODULE_MPE = 1, - NV_HOST_MODULE_GR3D = 6 -}; - -/* cdma opcodes */ -static inline u32 nvhost_opcode_setclass( - unsigned class_id, unsigned offset, unsigned mask) -{ - return (0 << 28) | (offset << 16) | (class_id << 6) | mask; -} - -static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count) -{ - return (1 << 28) | (offset << 16) | count; -} - -static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count) -{ - return (2 << 28) | (offset << 16) | count; -} - -static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask) -{ - return (3 << 28) | (offset << 16) | mask; -} - -static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value) -{ - return (4 << 28) | (offset << 16) | value; -} - -static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) -{ - return nvhost_opcode_imm(host1x_uclass_incr_syncpt_r(), - nvhost_class_host_incr_syncpt(cond, indx)); -} - -static inline u32 nvhost_opcode_restart(unsigned address) -{ - return (5 << 28) | (address >> 4); -} - -static inline u32 nvhost_opcode_gather(unsigned count) -{ - return (6 << 28) | count; -} - -static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | count; -} - -static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; -} - -static inline u32 nvhost_opcode_gather_insert(unsigned offset, unsigned incr, - unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | (incr << 14) | count; -} - -static inline u32 nvhost_opcode_setstreamid(unsigned streamid) -{ - return (7 << 28) | streamid; -} - -static inline u32 nvhost_opcode_setpayload(unsigned payload) -{ - return (9 << 28) | payload; -} - -static inline u32 nvhost_opcode_acquire_mlock(unsigned id) -{ - return (14 << 28) | id; -} - -static inline u32 nvhost_opcode_release_mlock(unsigned id) -{ - return (14 << 28) | (1 << 24) | id; -} - -static inline u32 nvhost_opcode_incr_w(unsigned int offset) -{ - /* 22-bit offset supported */ - return (10 << 28) | offset; -} - -static inline u32 nvhost_opcode_nonincr_w(unsigned offset) -{ - /* 22-bit offset supported */ - return (11 << 28) | offset; -} - -#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0) - -static inline u32 nvhost_mask2(unsigned x, unsigned y) -{ - return 1 | (1 << (y - x)); -} - -#endif /* __NVHOST_HARDWARE_T194_H */ diff --git a/drivers/crypto/nvhost.h b/drivers/crypto/nvhost.h deleted file mode 100644 index 1b13a2c3..00000000 --- a/drivers/crypto/nvhost.h +++ /dev/null @@ -1,937 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2009-2023, NVIDIA Corporation. All rights reserved. - * - * Tegra graphics host driver - */ - -#ifndef __LINUX_NVHOST_H -#define __LINUX_NVHOST_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -//#include - -#ifdef CONFIG_TEGRA_HOST1X -#include -#endif - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && IS_ENABLED(CONFIG_TEGRA_HOST1X) -#error "Unable to enable TEGRA_GRHOST or TEGRA_HOST1X at the same time!" -#endif - -struct tegra_bwmgr_client; - -struct nvhost_channel; -struct nvhost_master; -struct nvhost_cdma; -struct nvhost_hwctx; -struct nvhost_device_power_attr; -struct nvhost_device_profile; -struct mem_mgr; -struct nvhost_as_moduleops; -struct nvhost_ctrl_sync_fence_info; -struct nvhost_sync_timeline; -struct nvhost_sync_pt; -enum nvdev_fence_kind; -struct nvdev_fence; -struct sync_pt; -struct dma_fence; -struct nvhost_fence; - -#define NVHOST_MODULE_MAX_CLOCKS 8 -#define NVHOST_MODULE_MAX_SYNCPTS 16 -#define NVHOST_MODULE_MAX_WAITBASES 3 -#define NVHOST_MODULE_MAX_MODMUTEXES 5 -#define NVHOST_MODULE_MAX_IORESOURCE_MEM 5 -#define NVHOST_NAME_SIZE 24 -#define NVSYNCPT_INVALID (-1) - -#define NVSYNCPT_AVP_0 (10) /* t20, t30, t114, t148 */ -#define NVSYNCPT_3D (22) /* t20, t30, t114, t148 */ -#define NVSYNCPT_VBLANK0 (26) /* t20, t30, t114, t148 */ -#define NVSYNCPT_VBLANK1 (27) /* t20, t30, t114, t148 */ - -#define NVMODMUTEX_ISP_0 (1) /* t124, t132, t210 */ -#define NVMODMUTEX_ISP_1 (2) /* t124, t132, t210 */ -#define NVMODMUTEX_NVJPG (3) /* t210 */ -#define NVMODMUTEX_NVDEC (4) /* t210 */ -#define NVMODMUTEX_MSENC (5) /* t124, t132, t210 */ -#define NVMODMUTEX_TSECA (6) /* t124, t132, t210 */ -#define NVMODMUTEX_TSECB (7) /* t124, t132, t210 */ -#define NVMODMUTEX_VI (8) /* t124, t132, t210 */ -#define NVMODMUTEX_VI_0 (8) /* t148 */ -#define NVMODMUTEX_VIC (10) /* t124, t132, t210 */ -#define NVMODMUTEX_VI_1 (11) /* t124, t132, t210 */ - -enum nvhost_power_sysfs_attributes { - NVHOST_POWER_SYSFS_ATTRIB_AUTOSUSPEND_DELAY, - NVHOST_POWER_SYSFS_ATTRIB_FORCE_ON, - NVHOST_POWER_SYSFS_ATTRIB_MAX -}; - -struct nvhost_notification { - struct { /* 0000- */ - __u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */ - } time_stamp; /* -0007 */ - __u32 info32; /* info returned depends on method 0008-000b */ -#define NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8 -#define NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY 13 -#define NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT 24 -#define NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY 25 -#define NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT 31 -#define NVHOST_CHANNEL_PBDMA_ERROR 32 -#define NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR 43 - __u16 info16; /* info returned depends on method 000c-000d */ - __u16 status; /* user sets bit 15, NV sets status 000e-000f */ -#define NVHOST_CHANNEL_SUBMIT_TIMEOUT 1 -}; - -struct nvhost_gating_register { - u64 addr; - u32 prod; - u32 disable; -}; - -struct nvhost_actmon_register { - u32 addr; - u32 val; -}; - -enum tegra_emc_request_type { - TEGRA_SET_EMC_FLOOR, /* lower bound */ - TEGRA_SET_EMC_CAP, /* upper bound */ - TEGRA_SET_EMC_ISO_CAP, /* upper bound that affects ISO Bw */ - TEGRA_SET_EMC_SHARED_BW, /* shared bw request */ - TEGRA_SET_EMC_SHARED_BW_ISO, /* for use by ISO Mgr only */ - TEGRA_SET_EMC_REQ_COUNT /* Should always be last */ -}; - -struct nvhost_clock { - char *name; - unsigned long default_rate; - u32 moduleid; - enum tegra_emc_request_type request_type; - bool disable_scaling; - unsigned long devfreq_rate; -}; - -struct nvhost_vm_hwid { - u64 addr; - bool dynamic; - u32 shift; -}; - -/* - * Defines HW and SW class identifiers. - * - * This is module ID mapping between userspace and kernelspace. - * The values of enum entries' are referred from NvRmModuleID enum defined - * in below userspace file: - * $TOP/vendor/nvidia/tegra/core/include/nvrm_module.h - * Please make sure each entry below has same value as set in above file. - */ -enum nvhost_module_identifier { - - /* Specifies external memory (DDR RAM, etc) */ - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER = 75, - - /* Specifies CBUS floor client module */ - NVHOST_MODULE_ID_CBUS_FLOOR = 119, - - /* Specifies shared EMC client module */ - NVHOST_MODULE_ID_EMC_SHARED, - NVHOST_MODULE_ID_MAX -}; - -enum nvhost_resource_policy { - RESOURCE_PER_DEVICE = 0, - RESOURCE_PER_CHANNEL_INSTANCE, -}; - -struct nvhost_device_data { - int version; /* ip version number of device */ - int id; /* Separates clients of same hw */ - void __iomem *aperture[NVHOST_MODULE_MAX_IORESOURCE_MEM]; - struct device_dma_parameters dma_parms; - - u32 modulemutexes[NVHOST_MODULE_MAX_MODMUTEXES]; - u32 moduleid; /* Module id for user space API */ - - /* interrupt ISR routine for falcon based engines */ - int (*flcn_isr)(struct platform_device *dev); - int irq; - int module_irq; /* IRQ bit from general intr reg for module intr */ - spinlock_t mirq_lock; /* spin lock for module irq */ - bool self_config_flcn_isr; /* skip setting up falcon interrupts */ - - /* Should we toggle the engine SLCG when we turn on the domain? */ - bool poweron_toggle_slcg; - - /* Flag to set SLCG notifier (for the modules other than VIC) */ - bool slcg_notifier_enable; - - /* Used to serialize channel when map-at-submit is used w/o mlocks */ - u32 last_submit_syncpt_id; - u32 last_submit_syncpt_value; - - bool power_on; /* If module is powered on */ - - u32 class; /* Device class */ - bool exclusive; /* True if only one user at a time */ - bool keepalive; /* Do not power gate when opened */ - bool serialize; /* Serialize submits in the channel */ - bool push_work_done; /* Push_op done into push buffer */ - bool poweron_reset; /* Reset the engine before powerup */ - bool virtual_dev; /* True if virtualized device */ - char *devfs_name; /* Name in devfs */ - char *devfs_name_family; /* Core of devfs name */ - - /* Support aborting the channel with close(channel_fd) */ - bool support_abort_on_close; - - char *firmware_name; /* Name of firmware */ - bool firmware_not_in_subdir; /* Firmware is not located in - chip subdirectory */ - - bool engine_can_cg; /* True if CG is enabled */ - bool can_powergate; /* True if module can be power gated */ - int autosuspend_delay;/* Delay before power gated */ - struct nvhost_clock clocks[NVHOST_MODULE_MAX_CLOCKS];/* Clock names */ - - /* Clock gating registers */ - struct nvhost_gating_register *engine_cg_regs; - - int num_clks; /* Number of clocks opened for dev */ -#if IS_ENABLED(CONFIG_TEGRA_GRHOST) - struct clk *clk[NVHOST_MODULE_MAX_CLOCKS]; -#else - struct clk_bulk_data *clks; -#endif - struct mutex lock; /* Power management lock */ - struct list_head client_list; /* List of clients and rate requests */ - - int num_channels; /* Max num of channel supported */ - int num_mapped_chs; /* Num of channel mapped to device */ - int num_ppc; /* Number of pixels per clock cycle */ - - /* device node for channel operations */ - dev_t cdev_region; - struct device *node; - struct cdev cdev; - - /* Address space device node */ - struct device *as_node; - struct cdev as_cdev; - - /* device node for ctrl block */ - struct class *nvhost_class; - struct device *ctrl_node; - struct cdev ctrl_cdev; - const struct file_operations *ctrl_ops; /* ctrl ops for the module */ - - /* address space operations */ - const struct nvhost_as_moduleops *as_ops; - - struct kobject *power_kobj; /* kobject to hold power sysfs entries */ - struct nvhost_device_power_attr *power_attrib; /* sysfs attributes */ - /* kobject to hold clk_cap sysfs entries */ - struct kobject clk_cap_kobj; - struct kobj_attribute *clk_cap_attrs; - struct dentry *debugfs; /* debugfs directory */ - - u32 nvhost_timeout_default; - - /* Data for devfreq usage */ - struct devfreq *power_manager; - /* Private device profile data */ - struct nvhost_device_profile *power_profile; - /* Should we read load estimate from hardware? */ - bool actmon_enabled; - /* Should we do linear emc scaling? */ - bool linear_emc; - /* Offset to actmon registers */ - u32 actmon_regs; - /* WEIGHT_COUNT of actmon */ - u32 actmon_weight_count; - struct nvhost_actmon_register *actmon_setting_regs; - /* Devfreq governor name */ - const char *devfreq_governor; - unsigned long *freq_table; - - /* Marks if the device is booted when pm runtime is disabled */ - bool booted; - - /* Should be marked as true if nvhost shouldn't create device nodes */ - bool kernel_only; - - void *private_data; /* private platform data */ - void *falcon_data; /* store the falcon info */ - struct platform_device *pdev; /* owner platform_device */ - void *virt_priv; /* private data for virtualized dev */ -#if IS_ENABLED(CONFIG_TEGRA_HOST1X) - struct host1x *host1x; /* host1x device */ -#endif - - struct mutex no_poweroff_req_mutex; - struct dev_pm_qos_request no_poweroff_req; - int no_poweroff_req_count; - - struct notifier_block toggle_slcg_notifier; - - struct rw_semaphore busy_lock; - bool forced_idle; - - /* Finalize power on. Can be used for context restore. */ - int (*finalize_poweron)(struct platform_device *dev); - - /* Called each time we enter the class */ - int (*init_class_context)(struct platform_device *dev, - struct nvhost_cdma *cdma); - - /* - * Reset the unit. Used for timeout recovery, resetting the unit on - * probe and when un-powergating. - */ - void (*reset)(struct platform_device *dev); - - /* Device is busy. */ - void (*busy)(struct platform_device *); - - /* Device is idle. */ - void (*idle)(struct platform_device *); - - /* Scaling init is run on device registration */ - void (*scaling_init)(struct platform_device *dev); - - /* Scaling deinit is called on device unregistration */ - void (*scaling_deinit)(struct platform_device *dev); - - /* Postscale callback is called after frequency change */ - void (*scaling_post_cb)(struct nvhost_device_profile *profile, - unsigned long freq); - - /* Preparing for power off. Used for context save. */ - int (*prepare_poweroff)(struct platform_device *dev); - - /* paring for power off. Used for context save. */ - int (*aggregate_constraints)(struct platform_device *dev, - int clk_index, - unsigned long floor_rate, - unsigned long pixel_rate, - unsigned long bw_rate); - - /* Called after successful client device init. This can - * be used in cases where the hardware specifics differ - * between hardware revisions */ - int (*hw_init)(struct platform_device *dev); - - /* Used to add platform specific masks on reloc address */ - dma_addr_t (*get_reloc_phys_addr)(dma_addr_t phys_addr, u32 reloc_type); - - /* Allocates a context handler for the device */ - struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt, - struct nvhost_channel *ch); - - /* engine specific init functions */ - int (*pre_virt_init)(struct platform_device *pdev); - int (*post_virt_init)(struct platform_device *pdev); - - /* engine specific functions */ - int (*memory_init)(struct platform_device *pdev); - - phys_addr_t carveout_addr; - phys_addr_t carveout_size; - - /* Information related to engine-side synchronization */ - void *syncpt_unit_interface; - - u64 transcfg_addr; - u32 transcfg_val; - u64 mamask_addr; - u32 mamask_val; - u64 borps_addr; - u32 borps_val; - struct nvhost_vm_hwid vm_regs[13]; - - /* Actmon IRQ from hintstatus_r */ - unsigned int actmon_irq; - - /* Is the device already forced on? */ - bool forced_on; - - /* Should we map channel at submit time? */ - bool resource_policy; - - /* Should we enable context isolation for this device? */ - bool isolate_contexts; - - /* channel user context list */ - struct mutex userctx_list_lock; - struct list_head userctx_list; - - /* reset control for this device */ - struct reset_control *reset_control; - - /* For loadable nvgpu module, we dynamically assign function - * pointer of gk20a_debug_dump_device once the module loads */ - void *debug_dump_data; - void (*debug_dump_device)(void *dev); - - /* icc client id for emc requests */ - int icc_id; - - /* icc_path handle handle */ - struct icc_path *icc_path_handle; - - /* bandwidth manager client id for emc requests */ - int bwmgr_client_id; - - /* bandwidth manager handle */ - struct tegra_bwmgr_client *bwmgr_handle; - - /* number of frames mlock can be locked for */ - u32 mlock_timeout_factor; - - /* eventlib id for the device */ - int eventlib_id; - - /* deliver task timestamps for falcon */ - void (*enable_timestamps)(struct platform_device *pdev, - struct nvhost_cdma *cdma, dma_addr_t timestamp_addr); - - /* enable risc-v boot */ - bool enable_riscv_boot; - - /* store the risc-v info */ - void *riscv_data; - - /* name of riscv descriptor binary */ - char *riscv_desc_bin; - - /* name of riscv image binary */ - char *riscv_image_bin; - -}; - - -static inline -struct nvhost_device_data *nvhost_get_devdata(struct platform_device *pdev) -{ - return (struct nvhost_device_data *)platform_get_drvdata(pdev); -} - -static inline bool nvhost_dev_is_virtual(struct platform_device *pdev) -{ - struct nvhost_device_data *pdata = platform_get_drvdata(pdev); - - return pdata->virtual_dev; -} - -struct nvhost_device_power_attr { - struct platform_device *ndev; - struct kobj_attribute power_attr[NVHOST_POWER_SYSFS_ATTRIB_MAX]; -}; - -int flcn_intr_init(struct platform_device *pdev); -int flcn_reload_fw(struct platform_device *pdev); -int nvhost_flcn_prepare_poweroff(struct platform_device *pdev); -int nvhost_flcn_finalize_poweron(struct platform_device *dev); - -/* common runtime pm and power domain APIs */ -int nvhost_module_init(struct platform_device *ndev); -void nvhost_module_deinit(struct platform_device *dev); -void nvhost_module_reset(struct platform_device *dev, bool reboot); -void nvhost_module_idle(struct platform_device *dev); -void nvhost_module_idle_mult(struct platform_device *pdev, int refs); -int nvhost_module_busy(struct platform_device *dev); -extern const struct dev_pm_ops nvhost_module_pm_ops; - -void host1x_writel(struct platform_device *dev, u32 r, u32 v); -u32 host1x_readl(struct platform_device *dev, u32 r); - -/* common device management APIs */ -int nvhost_client_device_get_resources(struct platform_device *dev); -int nvhost_client_device_release(struct platform_device *dev); -int nvhost_client_device_init(struct platform_device *dev); - -/* public host1x sync-point management APIs */ -u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev, - u32 param, const char *syncpt_name); -void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id); -bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id); -void nvhost_syncpt_set_minval(struct platform_device *dev, u32 id, u32 val); -void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val); -u32 nvhost_syncpt_read_maxval(struct platform_device *dev, u32 id); -u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs); -int nvhost_syncpt_is_expired_ext(struct platform_device *dev, u32 id, - u32 thresh); -dma_addr_t nvhost_syncpt_address(struct platform_device *engine_pdev, u32 id); -int nvhost_syncpt_unit_interface_init(struct platform_device *pdev); - -/* public host1x interrupt management APIs */ -int nvhost_intr_register_notifier(struct platform_device *pdev, - u32 id, u32 thresh, - void (*callback)(void *, int), - void *private_data); - -/* public host1x sync-point management APIs */ -#ifdef CONFIG_TEGRA_HOST1X - -static inline struct flcn *get_flcn(struct platform_device *pdev) -{ - struct nvhost_device_data *pdata = platform_get_drvdata(pdev); - - return pdata ? pdata->falcon_data : NULL; -} - -static inline int nvhost_module_set_rate(struct platform_device *dev, void *priv, - unsigned long constraint, int index, - unsigned long attr) -{ - return 0; -} - -static inline int nvhost_module_add_client(struct platform_device *dev, void *priv) -{ - return 0; -} - -static inline void nvhost_module_remove_client(struct platform_device *dev, void *priv) { } - -static inline int nvhost_syncpt_get_cv_dev_address_table(struct platform_device *engine_pdev, - int *count, dma_addr_t **table) -{ - return -ENODEV; -} - -static inline const struct firmware * -nvhost_client_request_firmware(struct platform_device *dev, - const char *fw_name, bool warn) -{ - return NULL; -} - -static inline void nvhost_debug_dump_device(struct platform_device *pdev) -{ -} - -static inline int nvhost_fence_create_fd( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name, - s32 *fence_fd) -{ - return -EOPNOTSUPP; -} - -static inline int nvhost_fence_foreach_pt( - struct nvhost_fence *fence, - int (*iter)(struct nvhost_ctrl_sync_fence_info, void *), - void *data) -{ - return -EOPNOTSUPP; -} - -static inline struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, - int num_cmdbufs, int num_relocs, int num_waitchks, - int num_syncpts) -{ - return NULL; -} - -static inline void nvhost_job_put(struct nvhost_job *job) {} - -static inline int nvhost_job_add_client_gather_address(struct nvhost_job *job, - u32 num_words, u32 class_id, dma_addr_t gather_address) -{ - return -EOPNOTSUPP; -} - -static inline int nvhost_channel_map(struct nvhost_device_data *pdata, - struct nvhost_channel **ch, - void *identifier) -{ - return -EOPNOTSUPP; -} - -static inline int nvhost_channel_submit(struct nvhost_job *job) -{ - return -EOPNOTSUPP; -} - -static inline void nvhost_putchannel(struct nvhost_channel *ch, int cnt) {} - -static inline struct nvhost_fence *nvhost_fence_get(int fd) -{ - return NULL; -} - -static inline void nvhost_fence_put(struct nvhost_fence *fence) {} - -static inline int nvhost_fence_num_pts(struct nvhost_fence *fence) -{ - return 0; -} - -static inline dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr, - u32 reloc_type) -{ - return 0; -} - -static inline dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr, - u32 reloc_type) -{ - return 0; -} - -static inline void nvhost_eventlib_log_task(struct platform_device *pdev, - u32 syncpt_id, - u32 syncpt_thres, - u64 timestamp_start, - u64 timestamp_end) -{ -} - -static inline void nvhost_eventlib_log_submit(struct platform_device *pdev, - u32 syncpt_id, - u32 syncpt_thresh, - u64 timestamp) -{ -} - -static inline void nvhost_eventlib_log_fences(struct platform_device *pdev, - u32 task_syncpt_id, - u32 task_syncpt_thresh, - struct nvdev_fence *fences, - u8 num_fences, - u32 kind, - u64 timestamp) -{ -} -#else - -#ifdef CONFIG_DEBUG_FS -void nvhost_register_dump_device( - struct platform_device *dev, - void (*nvgpu_debug_dump_device)(void *), - void *data); -void nvhost_unregister_dump_device(struct platform_device *dev); -#else -static inline void nvhost_register_dump_device( - struct platform_device *dev, - void (*nvgpu_debug_dump_device)(void *), - void *data) -{ -} - -static inline void nvhost_unregister_dump_device(struct platform_device *dev) -{ -} -#endif - -void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v); -u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r); - -void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v); -u32 host1x_sync_readl(struct nvhost_master *dev, u32 r); - -/* public host1x power management APIs */ -bool nvhost_module_powered_ext(struct platform_device *dev); -/* This power ON only host1x and doesn't power ON module */ -int nvhost_module_busy_ext(struct platform_device *dev); -/* This power OFF only host1x and doesn't power OFF module */ -void nvhost_module_idle_ext(struct platform_device *dev); - -/* public api to return platform_device ptr to the default host1x instance */ -struct platform_device *nvhost_get_default_device(void); - - /* Public PM nvhost APIs. */ -/* This power ON both host1x and module */ -int nvhost_module_busy(struct platform_device *dev); -/* This power OFF both host1x and module */ -void nvhost_module_idle(struct platform_device *dev); - -/* public api to register/unregister a subdomain */ -void nvhost_register_client_domain(struct generic_pm_domain *domain); -void nvhost_unregister_client_domain(struct generic_pm_domain *domain); - -int nvhost_module_add_client(struct platform_device *dev, - void *priv); -void nvhost_module_remove_client(struct platform_device *dev, - void *priv); - -int nvhost_module_set_rate(struct platform_device *dev, void *priv, - unsigned long constraint, int index, unsigned long attr); - -/* public APIs required to submit in-kernel work */ -int nvhost_channel_map(struct nvhost_device_data *pdata, - struct nvhost_channel **ch, - void *identifier); -void nvhost_putchannel(struct nvhost_channel *ch, int cnt); -/* Allocate memory for a job. Just enough memory will be allocated to - * accomodate the submit announced in submit header. - */ -struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, - int num_cmdbufs, int num_relocs, int num_waitchks, - int num_syncpts); -/* Decrement reference job, free if goes to zero. */ -void nvhost_job_put(struct nvhost_job *job); - -/* Add a gather with IOVA address to job */ -int nvhost_job_add_client_gather_address(struct nvhost_job *job, - u32 num_words, u32 class_id, dma_addr_t gather_address); -int nvhost_channel_submit(struct nvhost_job *job); - -/* public host1x sync-point management APIs */ -u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev, - const char *syncpt_name); -void nvhost_syncpt_get_ref_ext(struct platform_device *pdev, u32 id); -const char *nvhost_syncpt_get_name(struct platform_device *dev, int id); -void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id); -int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val); -int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id, u32 thresh, - u32 timeout, u32 *value, struct timespec64 *ts); -int nvhost_syncpt_create_fence_single_ext(struct platform_device *dev, - u32 id, u32 thresh, const char *name, int *fence_fd); -void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id); -int nvhost_syncpt_nb_pts_ext(struct platform_device *dev); -bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id); -u32 nvhost_syncpt_read_minval(struct platform_device *dev, u32 id); -void nvhost_syncpt_set_maxval(struct platform_device *dev, u32 id, u32 val); -int nvhost_syncpt_fd_get_ext(int fd, struct platform_device *pdev, u32 *id); - -void nvhost_eventlib_log_task(struct platform_device *pdev, - u32 syncpt_id, - u32 syncpt_thres, - u64 timestamp_start, - u64 timestamp_end); - -void nvhost_eventlib_log_submit(struct platform_device *pdev, - u32 syncpt_id, - u32 syncpt_thresh, - u64 timestamp); - -void nvhost_eventlib_log_fences(struct platform_device *pdev, - u32 task_syncpt_id, - u32 task_syncpt_thresh, - struct nvdev_fence *fences, - u8 num_fences, - u32 kind, - u64 timestamp); - -dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr, - u32 reloc_type); -dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr, - u32 reloc_type); - -/* public host1x interrupt management APIs */ -int nvhost_intr_register_fast_notifier(struct platform_device *pdev, - u32 id, u32 thresh, - void (*callback)(void *, int), - void *private_data); - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && defined(CONFIG_DEBUG_FS) -void nvhost_debug_dump_device(struct platform_device *pdev); -#else -static inline void nvhost_debug_dump_device(struct platform_device *pdev) -{ -} -#endif - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST) -const struct firmware * -nvhost_client_request_firmware(struct platform_device *dev, - const char *fw_name, bool warn); -#else -static inline const struct firmware * -nvhost_client_request_firmware(struct platform_device *dev, - const char *fw_name, bool warn) -{ - return NULL; -} -#endif - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) - -int nvhost_fence_foreach_pt( - struct nvhost_fence *fence, - int (*iter)(struct nvhost_ctrl_sync_fence_info, void *), - void *data); - -int nvhost_fence_get_pt( - struct nvhost_fence *fence, size_t i, - u32 *id, u32 *threshold); - -struct nvhost_fence *nvhost_fence_create( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name); -int nvhost_fence_create_fd( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name, - s32 *fence_fd); - -struct nvhost_fence *nvhost_fence_get(int fd); -struct nvhost_fence *nvhost_fence_dup(struct nvhost_fence *fence); -int nvhost_fence_num_pts(struct nvhost_fence *fence); -int nvhost_fence_install(struct nvhost_fence *fence, int fence_fd); -void nvhost_fence_put(struct nvhost_fence *fence); -void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms); - -#else - -static inline int nvhost_fence_foreach_pt( - struct nvhost_fence *fence, - int (*iter)(struct nvhost_ctrl_sync_fence_info, void *d), - void *d) -{ - return -EOPNOTSUPP; -} - -static inline struct nvhost_fence *nvhost_create_fence( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name) -{ - return ERR_PTR(-EINVAL); -} - -static inline int nvhost_fence_create_fd( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name, - s32 *fence_fd) -{ - return -EINVAL; -} - -static inline struct nvhost_fence *nvhost_fence_get(int fd) -{ - return NULL; -} - -static inline int nvhost_fence_num_pts(struct nvhost_fence *fence) -{ - return 0; -} - -static inline void nvhost_fence_put(struct nvhost_fence *fence) -{ -} - -static inline void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms) -{ -} - -#endif - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && !defined(CONFIG_SYNC) -int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id, u32 *threshold); -bool nvhost_dma_fence_is_waitable(struct dma_fence *fence); -#else -static inline int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id, - u32 *threshold) -{ - return -EINVAL; -} -static inline bool nvhost_dma_fence_is_waitable(struct dma_fence *fence) -{ - return false; -} -#endif - -#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && defined(CONFIG_SYNC) -struct sync_fence *nvhost_sync_fdget(int fd); -int nvhost_sync_num_pts(struct sync_fence *fence); -struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, const char *name); -int nvhost_sync_create_fence_fd( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name, - s32 *fence_fd); -int nvhost_sync_fence_set_name(int fence_fd, const char *name); -u32 nvhost_sync_pt_id(struct sync_pt *__pt); -u32 nvhost_sync_pt_thresh(struct sync_pt *__pt); -struct sync_pt *nvhost_sync_pt_from_fence_index(struct sync_fence *fence, - u32 sync_pt_index); -#else -static inline struct sync_fence *nvhost_sync_fdget(int fd) -{ - return NULL; -} - -static inline int nvhost_sync_num_pts(struct sync_fence *fence) -{ - return 0; -} - -static inline struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, const char *name) -{ - return ERR_PTR(-EINVAL); -} - -static inline int nvhost_sync_create_fence_fd( - struct platform_device *pdev, - struct nvhost_ctrl_sync_fence_info *pts, - u32 num_pts, - const char *name, - s32 *fence_fd) -{ - return -EINVAL; -} - -static inline int nvhost_sync_fence_set_name(int fence_fd, const char *name) -{ - return -EINVAL; -} - -static inline u32 nvhost_sync_pt_id(struct sync_pt *__pt) -{ - return 0; -} - -static inline u32 nvhost_sync_pt_thresh(struct sync_pt *__pt) -{ - return 0; -} - -static inline struct sync_pt *nvhost_sync_pt_from_fence_index( - struct sync_fence *fence, u32 sync_pt_index) -{ - return NULL; -} -#endif - -/* Hacky way to get access to struct nvhost_device_data for VI device. */ -extern struct nvhost_device_data t20_vi_info; -extern struct nvhost_device_data t30_vi_info; -extern struct nvhost_device_data t11_vi_info; -extern struct nvhost_device_data t14_vi_info; - -int nvdec_do_idle(void); -int nvdec_do_unidle(void); - -#endif - -#endif diff --git a/drivers/crypto/tegra-se-nvhost.c b/drivers/crypto/tegra-se-nvhost.c deleted file mode 100644 index 9bebb633..00000000 --- a/drivers/crypto/tegra-se-nvhost.c +++ /dev/null @@ -1,7653 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. - * - * Cryptographic API. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -//#include -#include - -#include -#include "nvhost.h" -#include "tegra-se-nvhost.h" -#include "hardware_t194.h" - -#define DRIVER_NAME "tegra-se-nvhost" -#define NV_SE1_CLASS_ID 0x3A -#define NV_SE2_CLASS_ID 0x3B -#define NV_SE3_CLASS_ID 0x3C -#define NV_SE4_CLASS_ID 0x3D -#define NUM_SE_ALGO 6 -#define MIN_DH_SZ_BITS 1536 -#define GCM_IV_SIZE 12 - -#define __nvhost_opcode_nonincr(x, y) nvhost_opcode_nonincr((x) / 4, (y)) -#define __nvhost_opcode_incr(x, y) nvhost_opcode_incr((x) / 4, (y)) -#define __nvhost_opcode_nonincr_w(x) nvhost_opcode_nonincr_w((x) / 4) -#define __nvhost_opcode_incr_w(x) nvhost_opcode_incr_w((x) / 4) - -/* Security Engine operation modes */ -enum tegra_se_aes_op_mode { - SE_AES_OP_MODE_CBC, /* Cipher Block Chaining (CBC) mode */ - SE_AES_OP_MODE_ECB, /* Electronic Codebook (ECB) mode */ - SE_AES_OP_MODE_CTR, /* Counter (CTR) mode */ - SE_AES_OP_MODE_OFB, /* Output feedback (CFB) mode */ - SE_AES_OP_MODE_CMAC, /* Cipher-based MAC (CMAC) mode */ - SE_AES_OP_MODE_RNG_DRBG, /* Deterministic Random Bit Generator */ - SE_AES_OP_MODE_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */ - SE_AES_OP_MODE_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */ - SE_AES_OP_MODE_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */ - SE_AES_OP_MODE_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */ - SE_AES_OP_MODE_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */ - SE_AES_OP_MODE_SHA3_224,/* Secure Hash Algorithm3-224 (SHA3-224) mode */ - SE_AES_OP_MODE_SHA3_256,/* Secure Hash Algorithm3-256 (SHA3-256) mode */ - SE_AES_OP_MODE_SHA3_384,/* Secure Hash Algorithm3-384 (SHA3-384) mode */ - SE_AES_OP_MODE_SHA3_512,/* Secure Hash Algorithm3-512 (SHA3-512) mode */ - SE_AES_OP_MODE_SHAKE128,/* Secure Hash Algorithm3 (SHAKE128) mode */ - SE_AES_OP_MODE_SHAKE256,/* Secure Hash Algorithm3 (SHAKE256) mode */ - SE_AES_OP_MODE_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */ - SE_AES_OP_MODE_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */ - SE_AES_OP_MODE_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */ - SE_AES_OP_MODE_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */ - SE_AES_OP_MODE_XTS, /* XTS mode */ - SE_AES_OP_MODE_INS, /* key insertion */ - SE_AES_OP_MODE_CBC_MAC, /* CBC MAC mode */ - SE_AES_OP_MODE_GCM /* GCM mode */ -}; - -enum tegra_se_aes_gcm_mode { - SE_AES_GMAC, - SE_AES_GCM_ENC, - SE_AES_GCM_DEC, - SE_AES_GCM_FINAL -}; - -/* Security Engine key table type */ -enum tegra_se_key_table_type { - SE_KEY_TABLE_TYPE_KEY, /* Key */ - SE_KEY_TABLE_TYPE_KEY_IN_MEM, /* Key in Memory */ - SE_KEY_TABLE_TYPE_ORGIV, /* Original IV */ - SE_KEY_TABLE_TYPE_UPDTDIV, /* Updated IV */ - SE_KEY_TABLE_TYPE_XTS_KEY1, /* XTS Key1 */ - SE_KEY_TABLE_TYPE_XTS_KEY2, /* XTS Key2 */ - SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM, /* XTS Key1 in Memory */ - SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM, /* XTS Key2 in Memory */ - SE_KEY_TABLE_TYPE_CMAC, - SE_KEY_TABLE_TYPE_HMAC, - SE_KEY_TABLE_TYPE_GCM /* GCM */ -}; - -/* Key access control type */ -enum tegra_se_kac_type { - SE_KAC_T18X, - SE_KAC_T23X -}; - -struct tegra_se_chipdata { - unsigned long aes_freq; - unsigned int cpu_freq_mhz; - enum tegra_se_kac_type kac_type; -}; - -/* Security Engine Linked List */ -struct tegra_se_ll { - dma_addr_t addr; /* DMA buffer address */ - u32 data_len; /* Data length in DMA buffer */ -}; - -enum tegra_se_algo { - SE_DRBG, - SE_AES, - SE_CMAC, - SE_RSA, - SE_SHA, - SE_AEAD, -}; - -enum tegra_se_callback { - NONE, - AES_CB, - SHA_CB, -}; - -struct tegra_se_cmdbuf { - dma_addr_t iova; - u32 *addr; - struct device *dev; - struct kref ref; - struct host1x_bo bo; - u32 words; - ssize_t size; -}; - -struct tegra_se_dev { - struct platform_device *pdev; - struct device *dev; - void __iomem *io_regs; /* se device memory/io */ - void __iomem *pmc_io_reg; /* pmc device memory/io */ - struct mutex lock; /* Protect request queue */ - /* Mutex lock (mtx) is to protect Hardware, as it can be used - * in parallel by different threads. For example, set_key - * request can come from a different thread and access HW - */ - struct mutex mtx; - struct clk *pclk; /* Security Engine clock */ - struct clk *enclk; /* Security Engine clock */ - struct crypto_queue queue; /* Security Engine crypto queue */ - struct tegra_se_slot *slot_list; /* pointer to key slots */ - struct tegra_se_rsa_slot *rsa_slot_list; /* rsa key slot pointer */ - struct tegra_se_chipdata *chipdata; /* chip specific data */ - u32 *src_ll_buf; /* pointer to source linked list buffer */ - dma_addr_t src_ll_buf_adr; /* Source linked list buffer dma address */ - u32 src_ll_size; /* Size of source linked list buffer */ - u32 *dst_ll_buf; /* pointer to destination linked list buffer */ - dma_addr_t dst_ll_buf_adr; /* Destination linked list dma address */ - u32 dst_ll_size; /* Size of destination linked list buffer */ - struct tegra_se_ll *src_ll; - struct tegra_se_ll *dst_ll; - struct tegra_se_ll *aes_src_ll; - struct tegra_se_ll *aes_dst_ll; - u32 *dh_buf1, *dh_buf2; - struct skcipher_request *reqs[SE_MAX_TASKS_PER_SUBMIT]; - struct ahash_request *sha_req; - unsigned int req_cnt; - u32 stream_id; - u32 syncpt_id; - u32 opcode_addr; - bool work_q_busy; /* Work queue busy status */ - struct host1x_channel *channel; - struct work_struct se_work; - struct workqueue_struct *se_work_q; - struct scatterlist sg; - struct host1x_client client; - bool dynamic_mem; - u32 *total_aes_buf; - dma_addr_t total_aes_buf_addr; - void *aes_buf; - dma_addr_t aes_buf_addr; - void *aes_bufs[SE_MAX_AESBUF_ALLOC]; - dma_addr_t aes_buf_addrs[SE_MAX_AESBUF_ALLOC]; - atomic_t aes_buf_stat[SE_MAX_AESBUF_ALLOC]; - dma_addr_t aes_addr; - dma_addr_t aes_cur_addr; - unsigned int cmdbuf_cnt; - unsigned int src_bytes_mapped; - unsigned int dst_bytes_mapped; - unsigned int gather_buf_sz; - unsigned int aesbuf_entry; - struct pm_qos_request boost_cpufreq_req; - /* Lock to protect cpufreq boost status */ - struct mutex boost_cpufreq_lock; - struct delayed_work restore_cpufreq_work; - unsigned long cpufreq_last_boosted; - bool cpufreq_boosted; - bool ioc; - bool sha_last; - bool sha_src_mapped; - bool sha_dst_mapped; -}; - -static struct tegra_se_dev *se_devices[NUM_SE_ALGO]; - -/* Security Engine request context */ -struct tegra_se_req_context { - enum tegra_se_aes_op_mode op_mode; /* Security Engine operation mode */ - bool encrypt; /* Operation type */ - u32 config; - u32 crypto_config; - bool init; /* For GCM */ - u8 *hash_result; /* Hash result buffer */ - struct tegra_se_dev *se_dev; -}; - -struct tegra_se_priv_data { - struct skcipher_request *reqs[SE_MAX_TASKS_PER_SUBMIT]; - struct ahash_request *sha_req; - struct tegra_se_dev *se_dev; - unsigned int req_cnt; - unsigned int src_bytes_mapped; - unsigned int dst_bytes_mapped; - unsigned int gather_buf_sz; - struct scatterlist sg; - void *buf; - bool dynmem; - bool sha_last; - bool sha_src_mapped; - bool sha_dst_mapped; - dma_addr_t buf_addr; - dma_addr_t iova; - unsigned int aesbuf_entry; -}; - -/* Security Engine AES context */ -struct tegra_se_aes_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct skcipher_request *req; - struct tegra_se_slot *slot; /* Security Engine key slot */ - struct tegra_se_slot *slot2; /* Security Engine key slot 2 */ - u32 keylen; /* key length in bits */ - u32 op_mode; /* AES operation mode */ - bool is_key_in_mem; /* Whether key is in memory */ - u8 key[64]; /* To store key if is_key_in_mem set */ -}; - -/* Security Engine random number generator context */ -struct tegra_se_rng_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct skcipher_request *req; - struct tegra_se_slot *slot; /* Security Engine key slot */ - u32 *dt_buf; /* Destination buffer pointer */ - dma_addr_t dt_buf_adr; /* Destination buffer dma address */ - u32 *rng_buf; /* RNG buffer pointer */ - dma_addr_t rng_buf_adr; /* RNG buffer dma address */ -}; - -/* Security Engine SHA context */ -struct tegra_se_sha_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - u32 op_mode; /* SHA operation mode */ - bool is_first; /* Represents first block */ - u8 *sha_buf[2]; /* Buffer to store residual data */ - dma_addr_t sha_buf_addr[2]; /* DMA address to residual data */ - u32 total_count; /* Total bytes in all the requests */ - u32 residual_bytes; /* Residual byte count */ - u32 blk_size; /* SHA block size */ - bool is_final; /* To know it is final/finup request */ - - /* HMAC Support*/ - struct tegra_se_slot *slot; /* Security Engine key slot */ - u32 keylen; /* key length in bits */ -}; - -struct tegra_se_sha_zero_length_vector { - unsigned int size; - char *digest; -}; - -/* Security Engine AES CMAC context */ -struct tegra_se_aes_cmac_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct tegra_se_slot *slot; /* Security Engine key slot */ - struct tegra_se_cmdbuf *cmdbuf; - u32 keylen; /* key length in bits */ - u8 K1[TEGRA_SE_KEY_128_SIZE]; /* Key1 */ - u8 K2[TEGRA_SE_KEY_128_SIZE]; /* Key2 */ - u8 *buf; /* Buffer pointer to store update request*/ - dma_addr_t buf_dma_addr; /* DMA address of buffer */ - u32 nbytes; /* Total bytes in input buffer */ -}; - -struct tegra_se_dh_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct tegra_se_rsa_slot *slot; /* Security Engine rsa key slot */ - void *key; - void *p; - void *g; - unsigned int key_size; - unsigned int p_size; - unsigned int g_size; -}; - -/* Security Engine AES CCM context */ -struct tegra_se_aes_ccm_ctx { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct tegra_se_slot *slot; /* Security Engine key slot */ - u32 keylen; /* key length in bits */ - - u8 *mac; - dma_addr_t mac_addr; - u8 *enc_mac; /* Used during encryption */ - dma_addr_t enc_mac_addr; - u8 *dec_mac; /* Used during decryption */ - dma_addr_t dec_mac_addr; - - u8 *buf[4]; /* Temporary buffers */ - dma_addr_t buf_addr[4]; /* DMA address for buffers */ - unsigned int authsize; /* MAC size */ -}; - -/* Security Engine AES GCM context */ -struct tegra_se_aes_gcm_ctx { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct tegra_se_slot *slot; /* Security Engine key slot */ - u32 keylen; /* key length in bits */ - - u8 *mac; - dma_addr_t mac_addr; - unsigned int authsize; /* MAC size */ -}; - -/* Security Engine key slot */ -struct tegra_se_slot { - struct list_head node; - u8 slot_num; /* Key slot number */ - bool available; /* Tells whether key slot is free to use */ -}; - -static struct tegra_se_slot ssk_slot = { - .slot_num = 15, - .available = false, -}; - -static struct tegra_se_slot keymem_slot = { - .slot_num = 14, - .available = false, -}; - -static struct tegra_se_slot srk_slot = { - .slot_num = 0, - .available = false, -}; - -static struct tegra_se_slot pre_allocated_slot = { - .slot_num = 0, - .available = false, -}; - -static LIST_HEAD(key_slot); -static LIST_HEAD(rsa_key_slot); -static DEFINE_SPINLOCK(rsa_key_slot_lock); -static DEFINE_SPINLOCK(key_slot_lock); - -#define RNG_RESEED_INTERVAL 0x00773594 - -/* create a work for handling the async transfers */ -static void tegra_se_work_handler(struct work_struct *work); - -static int force_reseed_count; - -#define GET_MSB(x) ((x) >> (8 * sizeof(x) - 1)) -#define BOOST_PERIOD (msecs_to_jiffies(2 * 1000)) /* 2 seconds */ - -static unsigned int boost_cpu_freq; -module_param(boost_cpu_freq, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(boost_cpu_freq, "CPU frequency (in MHz) to boost"); - -static void tegra_se_restore_cpu_freq_fn(struct work_struct *work) -{ - struct tegra_se_dev *se_dev = container_of( - work, struct tegra_se_dev, restore_cpufreq_work.work); - unsigned long delay = BOOST_PERIOD; - - mutex_lock(&se_dev->boost_cpufreq_lock); - if (time_is_after_jiffies(se_dev->cpufreq_last_boosted + delay)) { - schedule_delayed_work(&se_dev->restore_cpufreq_work, delay); - } else { -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) - pm_qos_update_request(&se_dev->boost_cpufreq_req, - PM_QOS_DEFAULT_VALUE); -#else - cpu_latency_qos_update_request(&se_dev->boost_cpufreq_req, - PM_QOS_DEFAULT_VALUE); -#endif - se_dev->cpufreq_boosted = false; - } - mutex_unlock(&se_dev->boost_cpufreq_lock); -} - -static void tegra_se_boost_cpu_freq(struct tegra_se_dev *se_dev) -{ - unsigned long delay = BOOST_PERIOD; - s32 cpufreq_hz = boost_cpu_freq * 1000; - - mutex_lock(&se_dev->boost_cpufreq_lock); - if (!se_dev->cpufreq_boosted) { -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) - pm_qos_update_request(&se_dev->boost_cpufreq_req, cpufreq_hz); -#else - cpu_latency_qos_update_request(&se_dev->boost_cpufreq_req, - cpufreq_hz); -#endif - schedule_delayed_work(&se_dev->restore_cpufreq_work, delay); - se_dev->cpufreq_boosted = true; - } - - se_dev->cpufreq_last_boosted = jiffies; - mutex_unlock(&se_dev->boost_cpufreq_lock); -} - -static void tegra_se_boost_cpu_init(struct tegra_se_dev *se_dev) -{ - boost_cpu_freq = se_dev->chipdata->cpu_freq_mhz; - - INIT_DELAYED_WORK(&se_dev->restore_cpufreq_work, - tegra_se_restore_cpu_freq_fn); - -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) - pm_qos_add_request(&se_dev->boost_cpufreq_req, PM_QOS_CPU_FREQ_MIN, - PM_QOS_DEFAULT_VALUE); -#else - cpu_latency_qos_add_request(&se_dev->boost_cpufreq_req, - PM_QOS_DEFAULT_VALUE); -#endif - - mutex_init(&se_dev->boost_cpufreq_lock); -} - -static void tegra_se_boost_cpu_deinit(struct tegra_se_dev *se_dev) -{ - mutex_destroy(&se_dev->boost_cpufreq_lock); -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) - pm_qos_remove_request(&se_dev->boost_cpufreq_req); -#else - cpu_latency_qos_remove_request(&se_dev->boost_cpufreq_req); -#endif - cancel_delayed_work_sync(&se_dev->restore_cpufreq_work); -} - -static void tegra_se_leftshift_onebit(u8 *in_buf, u32 size, u8 *org_msb) -{ - u8 carry; - u32 i; - - *org_msb = GET_MSB(in_buf[0]); - - /* left shift one bit */ - in_buf[0] <<= 1; - for (carry = 0, i = 1; i < size; i++) { - carry = GET_MSB(in_buf[i]); - in_buf[i - 1] |= carry; - in_buf[i] <<= 1; - } -} - -static inline void se_writel(struct tegra_se_dev *se_dev, unsigned int val, - unsigned int reg_offset) -{ - writel(val, se_dev->io_regs + reg_offset); -} - -static inline unsigned int se_readl(struct tegra_se_dev *se_dev, - unsigned int reg_offset) -{ - unsigned int val; - - val = readl(se_dev->io_regs + reg_offset); - - return val; -} - -static void tegra_se_free_key_slot(struct tegra_se_slot *slot) -{ - if (slot) { - spin_lock(&key_slot_lock); - slot->available = true; - spin_unlock(&key_slot_lock); - } -} - -static struct tegra_se_slot *tegra_se_alloc_key_slot(void) -{ - struct tegra_se_slot *slot = NULL; - bool found = false; - - spin_lock(&key_slot_lock); - list_for_each_entry(slot, &key_slot, node) { - if (slot->available && - (slot->slot_num != pre_allocated_slot.slot_num)) { - slot->available = false; - found = true; - break; - } - } - spin_unlock(&key_slot_lock); - - return found ? slot : NULL; -} - -static int tegra_init_key_slot(struct tegra_se_dev *se_dev) -{ - int i; - - spin_lock_init(&key_slot_lock); - spin_lock(&key_slot_lock); - /* - *To avoid multiple secure engine initializing - *key-slots. - */ - if (key_slot.prev != key_slot.next) { - spin_unlock(&key_slot_lock); - return 0; - } - spin_unlock(&key_slot_lock); - - se_dev->slot_list = devm_kzalloc(se_dev->dev, - sizeof(struct tegra_se_slot) * - TEGRA_SE_KEYSLOT_COUNT, GFP_KERNEL); - if (!se_dev->slot_list) - return -ENOMEM; - - spin_lock(&key_slot_lock); - for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { - /* - * Slot 0, 14 and 15 are reserved and will not be added to the - * free slots pool. Slot 0 is used for SRK generation, Slot 14 - * for handling keys which are stored in memories and Slot 15 is - * is used for SSK operation. - */ - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if ((i == srk_slot.slot_num) || (i == ssk_slot.slot_num) - || (i == keymem_slot.slot_num)) - continue; - } - se_dev->slot_list[i].available = true; - se_dev->slot_list[i].slot_num = i; - INIT_LIST_HEAD(&se_dev->slot_list[i].node); - list_add_tail(&se_dev->slot_list[i].node, &key_slot); - } - spin_unlock(&key_slot_lock); - - return 0; -} - -static int tegra_se_alloc_ll_buf(struct tegra_se_dev *se_dev, u32 num_src_sgs, - u32 num_dst_sgs) -{ - if (se_dev->src_ll_buf || se_dev->dst_ll_buf) { - dev_err(se_dev->dev, - "trying to allocate memory to allocated memory\n"); - return -EBUSY; - } - - if (num_src_sgs) { - se_dev->src_ll_size = sizeof(struct tegra_se_ll) * num_src_sgs; - se_dev->src_ll_buf = dma_alloc_coherent( - se_dev->dev, se_dev->src_ll_size, - &se_dev->src_ll_buf_adr, GFP_KERNEL); - if (!se_dev->src_ll_buf) { - dev_err(se_dev->dev, - "can not allocate src lldma buffer\n"); - return -ENOMEM; - } - } - if (num_dst_sgs) { - se_dev->dst_ll_size = sizeof(struct tegra_se_ll) * num_dst_sgs; - se_dev->dst_ll_buf = dma_alloc_coherent( - se_dev->dev, se_dev->dst_ll_size, - &se_dev->dst_ll_buf_adr, GFP_KERNEL); - if (!se_dev->dst_ll_buf) { - dev_err(se_dev->dev, - "can not allocate dst ll dma buffer\n"); - return -ENOMEM; - } - } - - return 0; -} - -static void tegra_se_free_ll_buf(struct tegra_se_dev *se_dev) -{ - if (se_dev->src_ll_buf) { - dma_free_coherent(se_dev->dev, se_dev->src_ll_size, - se_dev->src_ll_buf, se_dev->src_ll_buf_adr); - se_dev->src_ll_buf = NULL; - } - - if (se_dev->dst_ll_buf) { - dma_free_coherent(se_dev->dev, se_dev->dst_ll_size, - se_dev->dst_ll_buf, se_dev->dst_ll_buf_adr); - se_dev->dst_ll_buf = NULL; - } -} - -static u32 tegra_se_get_config(struct tegra_se_dev *se_dev, - enum tegra_se_aes_op_mode mode, bool encrypt, - u32 data) -{ - u32 val = 0, key_len, is_last; - - switch (mode) { - case SE_AES_OP_MODE_CBC: - case SE_AES_OP_MODE_CMAC: - key_len = data; - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); - switch (se_dev->chipdata->kac_type) { - case SE_KAC_T23X: - if (mode == SE_AES_OP_MODE_CMAC) - val |= SE_CONFIG_ENC_MODE(MODE_CMAC); - break; - case SE_KAC_T18X: - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_ENC_MODE(MODE_KEY128); - break; - default: - break; - } - - val |= SE_CONFIG_DEC_ALG(ALG_NOP); - } else { - val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_DEC_MODE(MODE_KEY128); - } - } - if (mode == SE_AES_OP_MODE_CMAC) - val |= SE_CONFIG_DST(DST_HASHREG); - else - val |= SE_CONFIG_DST(DST_MEMORY); - break; - - case SE_AES_OP_MODE_CBC_MAC: - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC) - | SE_CONFIG_DEC_ALG(ALG_NOP) - | SE_CONFIG_DST(DST_HASHREG); - break; - - case SE_AES_OP_MODE_GCM: - if (data == SE_AES_GMAC) { - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC) - | SE_CONFIG_DEC_ALG(ALG_NOP) - | SE_CONFIG_ENC_MODE(MODE_GMAC); - } else { - val = SE_CONFIG_ENC_ALG(ALG_NOP) - | SE_CONFIG_DEC_ALG(ALG_AES_DEC) - | SE_CONFIG_DEC_MODE(MODE_GMAC); - } - } else if (data == SE_AES_GCM_ENC) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC) - | SE_CONFIG_DEC_ALG(ALG_NOP) - | SE_CONFIG_ENC_MODE(MODE_GCM); - } else if (data == SE_AES_GCM_DEC) { - val = SE_CONFIG_ENC_ALG(ALG_NOP) - | SE_CONFIG_DEC_ALG(ALG_AES_DEC) - | SE_CONFIG_DEC_MODE(MODE_GCM); - } else if (data == SE_AES_GCM_FINAL) { - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC) - | SE_CONFIG_DEC_ALG(ALG_NOP) - | SE_CONFIG_ENC_MODE(MODE_GCM_FINAL); - } else { - val = SE_CONFIG_ENC_ALG(ALG_NOP) - | SE_CONFIG_DEC_ALG(ALG_AES_DEC) - | SE_CONFIG_DEC_MODE(MODE_GCM_FINAL); - } - } - break; - - case SE_AES_OP_MODE_RNG_DRBG: - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - val = SE_CONFIG_ENC_ALG(ALG_RNG) | - SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_DST(DST_MEMORY); - } else { - val = SE_CONFIG_ENC_ALG(ALG_RNG) | - SE_CONFIG_ENC_MODE(MODE_KEY192) | - SE_CONFIG_DST(DST_MEMORY); - } - break; - - case SE_AES_OP_MODE_ECB: - key_len = data; - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_ENC_MODE(MODE_KEY128); - } - } else { - val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_DEC_MODE(MODE_KEY128); - } - } - val |= SE_CONFIG_DST(DST_MEMORY); - break; - case SE_AES_OP_MODE_CTR: - key_len = data; - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_ENC_MODE(MODE_KEY128); - } - } else { - val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_DEC_MODE(MODE_KEY128); - } - } - val |= SE_CONFIG_DST(DST_MEMORY); - break; - case SE_AES_OP_MODE_OFB: - key_len = data; - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_ENC_MODE(MODE_KEY128); - } - } else { - val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY256); - else if (key_len == TEGRA_SE_KEY_192_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY192); - else - val |= SE_CONFIG_DEC_MODE(MODE_KEY128); - } - } - val |= SE_CONFIG_DST(DST_MEMORY); - break; - - case SE_AES_OP_MODE_SHA1: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA1) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA224: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA224) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA256: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA256) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA384: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA384) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA512: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA512) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA3_224: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA3_224) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA3_256: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA3_256) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA3_384: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA3_384) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHA3_512: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHA3_512) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHAKE128: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHAKE128) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_SHAKE256: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_SHA) | - SE_CONFIG_ENC_MODE(MODE_SHAKE256) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_HMAC_SHA224: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_HMAC) | - SE_CONFIG_ENC_MODE(MODE_SHA224) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_HMAC_SHA256: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_HMAC) | - SE_CONFIG_ENC_MODE(MODE_SHA256) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_HMAC_SHA384: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_HMAC) | - SE_CONFIG_ENC_MODE(MODE_SHA384) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_HMAC_SHA512: - is_last = data; - val = SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_ENC_ALG(ALG_HMAC) | - SE_CONFIG_ENC_MODE(MODE_SHA512) | - SE_CONFIG_DST(DST_MEMORY); - if (!is_last) - val |= SE_CONFIG_DST(DST_HASHREG); - break; - case SE_AES_OP_MODE_XTS: - key_len = data; - if (encrypt) { - val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if ((key_len / 2) == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_ENC_MODE(MODE_KEY256); - else - val |= SE_CONFIG_ENC_MODE(MODE_KEY128); - } - val |= SE_CONFIG_DEC_ALG(ALG_NOP); - } else { - val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); - if (se_dev->chipdata->kac_type == SE_KAC_T18X) { - if (key_len / 2 == TEGRA_SE_KEY_256_SIZE) - val |= SE_CONFIG_DEC_MODE(MODE_KEY256); - else - val |= SE_CONFIG_DEC_MODE(MODE_KEY128); - } - val |= SE_CONFIG_ENC_ALG(ALG_NOP); - } - val |= SE_CONFIG_DST(DST_MEMORY); - break; - case SE_AES_OP_MODE_INS: - val = SE_CONFIG_ENC_ALG(ALG_INS); - val |= SE_CONFIG_DEC_ALG(ALG_NOP); - break; - default: - dev_warn(se_dev->dev, "Invalid operation mode\n"); - break; - } - - pr_debug("%s:%d config val = 0x%x\n", __func__, __LINE__, val); - - return val; -} - -static void tegra_unmap_sg(struct device *dev, struct scatterlist *sg, - enum dma_data_direction dir, u32 total) -{ - u32 total_loop; - - total_loop = total; - while (sg && (total_loop > 0)) { - dma_unmap_sg(dev, sg, 1, dir); - total_loop -= min_t(size_t, (size_t)sg->length, - (size_t)total_loop); - sg = sg_next(sg); - } -} - -static unsigned int tegra_se_count_sgs(struct scatterlist *sl, u32 nbytes) -{ - unsigned int sg_nents = 0; - - while (sl) { - sg_nents++; - nbytes -= min((size_t)sl->length, (size_t)nbytes); - if (!nbytes) - break; - sl = sg_next(sl); - } - - return sg_nents; -} - -static void tegra_se_sha_complete_callback(void *priv, int nr_completed) -{ - struct tegra_se_priv_data *priv_data = priv; - struct ahash_request *req; - struct tegra_se_dev *se_dev; - struct crypto_ahash *tfm; - struct tegra_se_req_context *req_ctx; - struct tegra_se_sha_context *sha_ctx; - int dst_len; - - pr_debug("%s:%d sha callback", __func__, __LINE__); - - se_dev = priv_data->se_dev; - - req = priv_data->sha_req; - if (!req) { - dev_err(se_dev->dev, "Invalid request for callback\n"); - devm_kfree(se_dev->dev, priv_data); - return; - } - - tfm = crypto_ahash_reqtfm(req); - req_ctx = ahash_request_ctx(req); - dst_len = crypto_ahash_digestsize(tfm); - sha_ctx = crypto_ahash_ctx(tfm); - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) - /* For SHAKE128/SHAKE256, digest size can vary */ -// if (sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE128 -// || sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE256) -// dst_len = req->dst_size; -#endif - - /* copy result */ - if (sha_ctx->is_final) - memcpy(req->result, req_ctx->hash_result, dst_len); - - if (priv_data->sha_src_mapped) - tegra_unmap_sg(se_dev->dev, req->src, DMA_TO_DEVICE, - priv_data->src_bytes_mapped); - - if (priv_data->sha_dst_mapped) - tegra_unmap_sg(se_dev->dev, &priv_data->sg, DMA_FROM_DEVICE, - priv_data->dst_bytes_mapped); - - devm_kfree(se_dev->dev, req_ctx->hash_result); - - req->base.complete(&req->base, 0); - - devm_kfree(se_dev->dev, priv_data); - - pr_debug("%s:%d sha callback complete", __func__, __LINE__); -} - -static void tegra_se_aes_complete_callback(void *priv, int nr_completed) -{ - int i = 0; - struct tegra_se_priv_data *priv_data = priv; - struct skcipher_request *req; - struct tegra_se_dev *se_dev; - void *buf; - u32 num_sgs; - - pr_debug("%s(%d) aes callback\n", __func__, __LINE__); - - se_dev = priv_data->se_dev; - - if (!priv_data->req_cnt) { - devm_kfree(se_dev->dev, priv_data); - return; - } - - if (!se_dev->ioc) - dma_sync_single_for_cpu(se_dev->dev, priv_data->buf_addr, - priv_data->gather_buf_sz, DMA_BIDIRECTIONAL); - - buf = priv_data->buf; - for (i = 0; i < priv_data->req_cnt; i++) { - req = priv_data->reqs[i]; - if (!req) { - dev_err(se_dev->dev, "Invalid request for callback\n"); - if (priv_data->dynmem) - kfree(priv_data->buf); - devm_kfree(se_dev->dev, priv_data); - return; - } - - num_sgs = tegra_se_count_sgs(req->dst, req->cryptlen); - if (num_sgs == 1) - memcpy(sg_virt(req->dst), buf, req->cryptlen); - else - sg_copy_from_buffer(req->dst, num_sgs, buf, - req->cryptlen); - - buf += req->cryptlen; - req->base.complete(&req->base, 0); - } - - if (!se_dev->ioc) - dma_unmap_sg(se_dev->dev, &priv_data->sg, 1, DMA_BIDIRECTIONAL); - - if (unlikely(priv_data->dynmem)) { - if (se_dev->ioc) - dma_free_coherent(se_dev->dev, priv_data->gather_buf_sz, - priv_data->buf, priv_data->buf_addr); - else - kfree(priv_data->buf); - } else { - atomic_set(&se_dev->aes_buf_stat[priv_data->aesbuf_entry], 1); - } - - devm_kfree(se_dev->dev, priv_data); - pr_debug("%s(%d) aes callback complete\n", __func__, __LINE__); -} - -static void se_nvhost_write_method(u32 *buf, u32 op1, u32 op2, u32 *offset) -{ - int i = 0; - - buf[i++] = op1; - buf[i++] = op2; - *offset = *offset + 2; -} - -static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo) -{ - struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); - - kref_get(&cmdbuf->ref); - - return host_bo; -} - -static void tegra_se_cmdbuf_release(struct kref *ref) -{ - struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref); - - dma_free_attrs(cmdbuf->dev->parent, cmdbuf->size, cmdbuf->addr, - cmdbuf->iova, 0); - - kfree(cmdbuf); -} - -static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo) -{ - struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); - - kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release); -} - -static struct host1x_bo_mapping * -tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction) -{ - struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo); - struct host1x_bo_mapping *map; - int err; - - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) - return ERR_PTR(-ENOMEM); - - kref_init(&map->ref); - map->bo = host1x_bo_get(bo); - map->direction = direction; - map->dev = dev; - - map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); - if (!map->sgt) { - err = -ENOMEM; - goto free; - } - - err = dma_get_sgtable(cmdbuf->dev->parent, map->sgt, cmdbuf->addr, - cmdbuf->iova, cmdbuf->words * 4); - if (err) - goto free_sgt; - - err = dma_map_sgtable(dev, map->sgt, direction, 0); - if (err) - goto free_sgt; - - map->phys = sg_dma_address(map->sgt->sgl); - map->size = cmdbuf->words * 4; - map->chunks = err; - - return map; - -free_sgt: - sg_free_table(map->sgt); - kfree(map->sgt); -free: - kfree(map); - return ERR_PTR(err); -} - -static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map) -{ - if (!map) - return; - - dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); - sg_free_table(map->sgt); - kfree(map->sgt); - host1x_bo_put(map->bo); - - kfree(map); -} - -static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo) -{ - struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); - - return cmdbuf->addr; -} - -static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr) -{ -} - -static const struct host1x_bo_ops tegra_se_cmdbuf_ops = { - .get = tegra_se_cmdbuf_get, - .put = tegra_se_cmdbuf_put, - .pin = tegra_se_cmdbuf_pin, - .unpin = tegra_se_cmdbuf_unpin, - .mmap = tegra_se_cmdbuf_mmap, - .munmap = tegra_se_cmdbuf_munmap, -}; - -static struct tegra_se_cmdbuf *tegra_se_alloc_cmdbuf(struct device *dev, ssize_t size) -{ - struct tegra_se_cmdbuf *cmdbuf; - - cmdbuf = devm_kzalloc(dev, sizeof(struct tegra_se_cmdbuf), GFP_KERNEL); - if (!cmdbuf) - return ERR_PTR(-ENOMEM); - - cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova, - GFP_KERNEL, 0); - if (!cmdbuf->addr) - return ERR_PTR(-ENOMEM); - - cmdbuf->size = size; - cmdbuf->dev = dev; - - host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops); - kref_init(&cmdbuf->ref); - - return cmdbuf; -} - -static int tegra_se_channel_submit_gather(struct tegra_se_dev *se_dev, - struct tegra_se_cmdbuf *cmdbuf, - u32 offset, u32 num_words, - enum tegra_se_callback callback) -{ - int i = 0; - struct host1x_job *job = NULL; - u32 syncpt_id = 0; - int err = 0; - struct tegra_se_priv_data *priv = NULL; - struct nvhost_device_data *pdata = platform_get_drvdata(se_dev->pdev); - - if (callback) { - priv = devm_kzalloc(se_dev->dev, - sizeof(struct tegra_se_priv_data), - GFP_KERNEL); - if (!priv) - return -ENOMEM; - } - - if (!se_dev->channel) { - se_dev->channel = host1x_channel_request(&se_dev->client); - if (err) { - dev_err(se_dev->dev, "Nvhost Channel map failed\n"); - goto exit; - } - } - - job = host1x_job_alloc(se_dev->channel, 1, 0, true); - if (!job) { - dev_err(se_dev->dev, "Nvhost Job allocation failed\n"); - err = -ENOMEM; - goto exit; - } - - if (!se_dev->syncpt_id) { - se_dev->syncpt_id = nvhost_get_syncpt_host_managed( - se_dev->pdev, 0, se_dev->pdev->name); - if (!se_dev->syncpt_id) { - dev_err(se_dev->dev, "Cannot get syncpt_id for SE(%s)\n", - se_dev->pdev->name); - err = -ENOMEM; - goto error; - } - } - syncpt_id = se_dev->syncpt_id; - - /* initialize job data */ - /* FIXME: Remove 'pdata' dependency here. Since 'pdata' is a static - * variable this causes failure when multiple instances of driver - * with same compatible is run. - * */ - job->syncpt = host1x_syncpt_get_by_id(pdata->host1x, syncpt_id); - job->syncpt_incrs = 1; - job->client = &se_dev->client; - job->class = se_dev->client.class; - job->serialize = true; - job->engine_fallback_streamid = se_dev->stream_id; - job->engine_streamid_offset = SE_STREAMID_REG_OFFSET; - se_dev->client.num_syncpts = 1; - - cmdbuf->dev = se_dev->dev; - cmdbuf->words = num_words; - - /* push increment after work has been completed */ - se_nvhost_write_method(&cmdbuf->addr[num_words], nvhost_opcode_nonincr( - host1x_uclass_incr_syncpt_r(), 1), - nvhost_class_host_incr_syncpt( - 0x00000001, syncpt_id), &num_words); - - host1x_job_add_gather(job, &cmdbuf->bo, num_words, offset); -#if 0 - err = nvhost_job_add_client_gather_address(job, num_words, - pdata->class, iova); - if (err) { - dev_err(se_dev->dev, "Nvhost failed to add gather\n"); - goto error; - } -#endif - - err = host1x_job_pin(job, se_dev->dev); - if (err) { - dev_err(se_dev->dev, "pin job failed\n"); - goto error; - } - - err = host1x_job_submit(job); - if (err) { - host1x_job_unpin(job); - dev_err(se_dev->dev, "Nvhost submit failed\n"); - goto error; - } - - if (callback == AES_CB) { - priv->se_dev = se_dev; - for (i = 0; i < se_dev->req_cnt; i++) - priv->reqs[i] = se_dev->reqs[i]; - - if (!se_dev->ioc) - priv->sg = se_dev->sg; - - if (unlikely(se_dev->dynamic_mem)) { - priv->buf = se_dev->aes_buf; - priv->dynmem = se_dev->dynamic_mem; - } else { - priv->buf = se_dev->aes_bufs[se_dev->aesbuf_entry]; - priv->aesbuf_entry = se_dev->aesbuf_entry; - } - - priv->buf_addr = se_dev->aes_addr; - priv->req_cnt = se_dev->req_cnt; - priv->gather_buf_sz = se_dev->gather_buf_sz; - - /* Register callback to be called once - * syncpt value has been reached - */ - err = nvhost_intr_register_notifier( - se_dev->pdev, syncpt_id, job->syncpt_end, - tegra_se_aes_complete_callback, priv); - if (err) { - dev_dbg(se_dev->dev, - "add nvhost interrupt action failed (%d) for AES\n", err); - tegra_se_aes_complete_callback(priv, 0); - err = 0; - // goto error; - } - } else if (callback == SHA_CB) { - priv->se_dev = se_dev; - priv->sha_req = se_dev->sha_req; - priv->sg = se_dev->sg; - priv->src_bytes_mapped = se_dev->src_bytes_mapped; - priv->dst_bytes_mapped = se_dev->dst_bytes_mapped; - priv->sha_src_mapped = se_dev->sha_src_mapped; - priv->sha_dst_mapped = se_dev->sha_dst_mapped; - priv->sha_last = se_dev->sha_last; - priv->buf_addr = se_dev->dst_ll->addr; - - err = nvhost_intr_register_notifier( - se_dev->pdev, syncpt_id, job->syncpt_end, - tegra_se_sha_complete_callback, priv); - if (err) { - dev_dbg(se_dev->dev, - "add nvhost interrupt action failed for SHA\n"); - tegra_se_sha_complete_callback(priv, 0); - err = 0; - // goto error; - } - } else { - /* wait until host1x has processed work */ - host1x_syncpt_wait( - job->syncpt, job->syncpt_end, - MAX_SCHEDULE_TIMEOUT, NULL); - } - - se_dev->req_cnt = 0; - se_dev->gather_buf_sz = 0; - se_dev->cmdbuf_cnt = 0; - se_dev->src_bytes_mapped = 0; - se_dev->dst_bytes_mapped = 0; - se_dev->sha_src_mapped = false; - se_dev->sha_dst_mapped = false; - se_dev->sha_last = false; -error: - host1x_job_put(job); -exit: - if (err) - devm_kfree(se_dev->dev, priv); - - return err; -} - -static void tegra_se_send_ctr_seed(struct tegra_se_dev *se_dev, u32 *pdata, - unsigned int opcode_addr, u32 *cpuvaddr) -{ - u32 j; - u32 cmdbuf_num_words = 0, i = 0; - - i = se_dev->cmdbuf_cnt; - - if (se_dev->chipdata->kac_type != SE_KAC_T23X) { - cpuvaddr[i++] = __nvhost_opcode_nonincr(opcode_addr + - SE_AES_CRYPTO_CTR_SPARE, 1); - cpuvaddr[i++] = SE_AES_CTR_LITTLE_ENDIAN; - } - cpuvaddr[i++] = nvhost_opcode_setpayload(4); - cpuvaddr[i++] = __nvhost_opcode_incr_w(opcode_addr + - SE_AES_CRYPTO_LINEAR_CTR); - for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) - cpuvaddr[i++] = pdata[j]; - - cmdbuf_num_words = i; - se_dev->cmdbuf_cnt = i; -} - -static int tegra_se_aes_ins_op(struct tegra_se_dev *se_dev, u8 *pdata, - u32 data_len, u8 slot_num, - enum tegra_se_key_table_type type, - unsigned int opcode_addr, - struct tegra_se_cmdbuf *cmdbuf, - enum tegra_se_callback callback) -{ - u32 *pdata_buf = (u32 *)pdata; - u32 *cpuvaddr = cmdbuf->addr; - u32 val = 0, j; - u32 cmdbuf_num_words = 0, i = 0; - int err = 0; - - struct { - u32 OPERATION; - u32 CRYPTO_KEYTABLE_KEYMANIFEST; - u32 CRYPTO_KEYTABLE_DST; - u32 CRYPTO_KEYTABLE_ADDR; - u32 CRYPTO_KEYTABLE_DATA; - } OFFSETS; - - if (!pdata_buf) { - dev_err(se_dev->dev, "No key data available\n"); - return -ENODATA; - } - - pr_debug("%s(%d) data_len = %d slot_num = %d\n", __func__, __LINE__, - data_len, slot_num); - - /* Initialize register offsets based on SHA/AES operation. */ - switch (type) { - case SE_KEY_TABLE_TYPE_HMAC: - OFFSETS.OPERATION = SE_SHA_OPERATION_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_KEYMANIFEST = - SE_SHA_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_DST = SE_SHA_CRYPTO_KEYTABLE_DST_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_ADDR = - SE_SHA_CRYPTO_KEYTABLE_ADDR_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_DATA = - SE_SHA_CRYPTO_KEYTABLE_DATA_OFFSET; - break; - default: - /* Use default case for AES operations. */ - OFFSETS.OPERATION = SE_AES_OPERATION_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_KEYMANIFEST = - SE_AES_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_DST = SE_AES_CRYPTO_KEYTABLE_DST_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_ADDR = - SE_AES_CRYPTO_KEYTABLE_ADDR_OFFSET; - OFFSETS.CRYPTO_KEYTABLE_DATA = - SE_AES_CRYPTO_KEYTABLE_DATA_OFFSET; - break; - } - - i = se_dev->cmdbuf_cnt; - - if (!se_dev->cmdbuf_cnt) { - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w( - opcode_addr + OFFSETS.OPERATION); - cpuvaddr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_OP(OP_DUMMY); - } - - /* key manifest */ - /* user */ - val = SE_KEYMANIFEST_USER(NS); - /* purpose */ - switch (type) { - case SE_KEY_TABLE_TYPE_XTS_KEY1: - case SE_KEY_TABLE_TYPE_XTS_KEY2: - val |= SE_KEYMANIFEST_PURPOSE(XTS); - break; - case SE_KEY_TABLE_TYPE_CMAC: - val |= SE_KEYMANIFEST_PURPOSE(CMAC); - break; - case SE_KEY_TABLE_TYPE_HMAC: - val |= SE_KEYMANIFEST_PURPOSE(HMAC); - break; - case SE_KEY_TABLE_TYPE_GCM: - val |= SE_KEYMANIFEST_PURPOSE(GCM); - break; - default: - val |= SE_KEYMANIFEST_PURPOSE(ENC); - break; - } - /* size */ - if (data_len == 16) - val |= SE_KEYMANIFEST_SIZE(KEY128); - else if (data_len == 24) - val |= SE_KEYMANIFEST_SIZE(KEY192); - else if (data_len == 32) - val |= SE_KEYMANIFEST_SIZE(KEY256); - /* exportable */ - val |= SE_KEYMANIFEST_EX(false); - - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w(opcode_addr + - OFFSETS.CRYPTO_KEYTABLE_KEYMANIFEST); - cpuvaddr[i++] = val; - - pr_debug("%s(%d) key manifest = 0x%x\n", __func__, __LINE__, val); - - if (type != SE_KEY_TABLE_TYPE_HMAC) { - /* configure slot number */ - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w( - opcode_addr + OFFSETS.CRYPTO_KEYTABLE_DST); - cpuvaddr[i++] = SE_AES_KEY_INDEX(slot_num); - } - - /* write key data */ - for (j = 0; j < data_len; j += 4) { - pr_debug("%s(%d) data_len = %d j = %d\n", __func__, - __LINE__, data_len, j); - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w( - opcode_addr + - OFFSETS.CRYPTO_KEYTABLE_ADDR); - val = (j / 4); /* program quad */ - cpuvaddr[i++] = val; - - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w( - opcode_addr + - OFFSETS.CRYPTO_KEYTABLE_DATA); - cpuvaddr[i++] = *pdata_buf++; - } - - /* configure INS operation */ - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w(opcode_addr); - cpuvaddr[i++] = tegra_se_get_config(se_dev, SE_AES_OP_MODE_INS, 0, 0); - - /* initiate key write operation to key slot */ - cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cpuvaddr[i++] = __nvhost_opcode_incr_w( - opcode_addr + OFFSETS.OPERATION); - cpuvaddr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_OP(OP_START) | - SE_OPERATION_LASTBUF(LASTBUF_TRUE); - - cmdbuf_num_words = i; - se_dev->cmdbuf_cnt = i; - - err = tegra_se_channel_submit_gather( - se_dev, cmdbuf, 0, cmdbuf_num_words, callback); - - pr_debug("%s(%d) complete\n", __func__, __LINE__); - - return err; -} - -static int tegra_se_send_key_data(struct tegra_se_dev *se_dev, u8 *pdata, - u32 data_len, u8 slot_num, - enum tegra_se_key_table_type type, - unsigned int opcode_addr, - struct tegra_se_cmdbuf *cmdbuf, - enum tegra_se_callback callback) -{ - u32 data_size; - u32 *pdata_buf = (u32 *)pdata; - u32 *cpuvaddr = cmdbuf->addr; - u8 pkt = 0, quad = 0; - u32 val = 0, j; - u32 cmdbuf_num_words = 0, i = 0; - int err = 0; - - if (!pdata_buf) { - dev_err(se_dev->dev, "No Key Data available\n"); - return -ENODATA; - } - - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - if (type == SE_KEY_TABLE_TYPE_ORGIV || - type == SE_KEY_TABLE_TYPE_UPDTDIV) { - pr_debug("%s(%d) IV programming\n", __func__, __LINE__); - - /* Program IV using CTR registers */ - tegra_se_send_ctr_seed(se_dev, pdata_buf, opcode_addr, - cpuvaddr); - } else { - /* Program key using INS operation */ - err = tegra_se_aes_ins_op(se_dev, pdata, data_len, - slot_num, type, opcode_addr, cmdbuf, callback); - } - return err; - } - - if ((type == SE_KEY_TABLE_TYPE_KEY) && - (slot_num == ssk_slot.slot_num)) { - dev_err(se_dev->dev, "SSK Key Slot used\n"); - return -EINVAL; - } - - if ((type == SE_KEY_TABLE_TYPE_ORGIV) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY2) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM)) - quad = QUAD_ORG_IV; - else if (type == SE_KEY_TABLE_TYPE_UPDTDIV) - quad = QUAD_UPDTD_IV; - else if ((type == SE_KEY_TABLE_TYPE_KEY) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY1) || - (type == SE_KEY_TABLE_TYPE_KEY_IN_MEM) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM)) - quad = QUAD_KEYS_128; - - i = se_dev->cmdbuf_cnt; - - if (!se_dev->cmdbuf_cnt) { - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + SE_AES_OPERATION_OFFSET, 1); - cpuvaddr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_OP(OP_DUMMY); - } - - data_size = SE_KEYTABLE_QUAD_SIZE_BYTES; - - do { - if (type == SE_KEY_TABLE_TYPE_XTS_KEY2 || - type == SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM) - pkt = SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL(SUBKEY_SEL_KEY2); - else if (type == SE_KEY_TABLE_TYPE_XTS_KEY1 || - type == SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM) - pkt = SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL(SUBKEY_SEL_KEY1); - - pkt |= (SE_KEYTABLE_SLOT(slot_num) | SE_KEYTABLE_QUAD(quad)); - - for (j = 0; j < data_size; j += 4, data_len -= 4) { - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + - SE_AES_CRYPTO_KEYTABLE_ADDR_OFFSET, 1); - - val = (SE_KEYTABLE_PKT(pkt) | (j / 4)); - cpuvaddr[i++] = val; - - cpuvaddr[i++] = __nvhost_opcode_incr( - opcode_addr + - SE_AES_CRYPTO_KEYTABLE_DATA_OFFSET, 1); - cpuvaddr[i++] = *pdata_buf++; - } - data_size = data_len; - if ((type == SE_KEY_TABLE_TYPE_KEY) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY1) || - (type == SE_KEY_TABLE_TYPE_KEY_IN_MEM) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM)) - quad = QUAD_KEYS_256; - else if ((type == SE_KEY_TABLE_TYPE_XTS_KEY2) || - (type == SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM)) - quad = QUAD_UPDTD_IV; - - } while (data_len); - - if ((type != SE_KEY_TABLE_TYPE_ORGIV) && - (type != SE_KEY_TABLE_TYPE_UPDTDIV) && - (type != SE_KEY_TABLE_TYPE_KEY_IN_MEM) && - (type != SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM) && - (type != SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM)) { - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + SE_AES_OPERATION_OFFSET, 1); - cpuvaddr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_OP(OP_DUMMY); - } - - cmdbuf_num_words = i; - se_dev->cmdbuf_cnt = i; - - if ((type != SE_KEY_TABLE_TYPE_ORGIV) && - (type != SE_KEY_TABLE_TYPE_UPDTDIV) && - (type != SE_KEY_TABLE_TYPE_KEY_IN_MEM) && - (type != SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM) && - (type != SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM)) - err = tegra_se_channel_submit_gather( - se_dev, cmdbuf, 0, cmdbuf_num_words, callback); - - return err; -} - -static u32 tegra_se_get_crypto_config(struct tegra_se_dev *se_dev, - enum tegra_se_aes_op_mode mode, - bool encrypt, u8 slot_num, - u8 slot2_num, bool org_iv) -{ - u32 val = 0; - unsigned long freq = 0; - - switch (mode) { - case SE_AES_OP_MODE_XTS: - if (encrypt) { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_TWEAK) | - SE_CRYPTO_XOR_POS(XOR_BOTH) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - } else { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_TWEAK) | - SE_CRYPTO_XOR_POS(XOR_BOTH) | - SE_CRYPTO_CORE_SEL(CORE_DECRYPT); - } - if (se_dev->chipdata->kac_type == SE_KAC_T23X) - val |= SE_CRYPTO_KEY2_INDEX(slot2_num); - freq = se_dev->chipdata->aes_freq; - break; - case SE_AES_OP_MODE_CMAC: - case SE_AES_OP_MODE_CBC: - if (encrypt) { - if (se_dev->chipdata->kac_type == SE_KAC_T18X || - (se_dev->chipdata->kac_type == SE_KAC_T23X && - mode == SE_AES_OP_MODE_CBC)) { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) | - SE_CRYPTO_XOR_POS(XOR_TOP) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - } - } else { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVAHB) | - SE_CRYPTO_XOR_POS(XOR_BOTTOM) | - SE_CRYPTO_CORE_SEL(CORE_DECRYPT); - } - freq = se_dev->chipdata->aes_freq; - break; - case SE_AES_OP_MODE_CBC_MAC: - val = SE_CRYPTO_XOR_POS(XOR_TOP) | - SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) | - SE_CRYPTO_HASH(HASH_ENABLE); - - if (se_dev->chipdata->kac_type == SE_KAC_T23X) - val |= SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - break; - case SE_AES_OP_MODE_RNG_DRBG: - val = SE_CRYPTO_INPUT_SEL(INPUT_RANDOM) | - SE_CRYPTO_XOR_POS(XOR_BYPASS) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - break; - case SE_AES_OP_MODE_ECB: - if (encrypt) { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_XOR_POS(XOR_BYPASS) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - } else { - val = SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) | - SE_CRYPTO_XOR_POS(XOR_BYPASS) | - SE_CRYPTO_CORE_SEL(CORE_DECRYPT); - } - freq = se_dev->chipdata->aes_freq; - break; - case SE_AES_OP_MODE_CTR: - val = SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_MEMORY) | - SE_CRYPTO_XOR_POS(XOR_BOTTOM) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - freq = se_dev->chipdata->aes_freq; - break; - case SE_AES_OP_MODE_OFB: - val = SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) | - SE_CRYPTO_VCTRAM_SEL(VCTRAM_MEMORY) | - SE_CRYPTO_XOR_POS(XOR_BOTTOM) | - SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); - freq = se_dev->chipdata->aes_freq; - break; - case SE_AES_OP_MODE_GCM: - break; - default: - dev_warn(se_dev->dev, "Invalid operation mode\n"); - break; - } - - if (mode == SE_AES_OP_MODE_CTR) { - val |= SE_CRYPTO_HASH(HASH_DISABLE) | - SE_CRYPTO_KEY_INDEX(slot_num) | - SE_CRYPTO_CTR_CNTN(1); - if (se_dev->chipdata->kac_type == SE_KAC_T23X) - val |= SE_CRYPTO_IV_SEL(IV_REG); - } else { - val |= SE_CRYPTO_HASH(HASH_DISABLE) | - SE_CRYPTO_KEY_INDEX(slot_num); - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - if (mode != SE_AES_OP_MODE_ECB && - mode != SE_AES_OP_MODE_CMAC && - mode != SE_AES_OP_MODE_GCM) - val |= SE_CRYPTO_IV_SEL(IV_REG); - } else { - val |= (org_iv ? SE_CRYPTO_IV_SEL(IV_ORIGINAL) : - SE_CRYPTO_IV_SEL(IV_UPDATED)); - } - } - - /* Enable hash for CMAC if running on T18X. */ - if (mode == SE_AES_OP_MODE_CMAC && - se_dev->chipdata->kac_type == SE_KAC_T18X) - val |= SE_CRYPTO_HASH(HASH_ENABLE); - - if (mode == SE_AES_OP_MODE_RNG_DRBG) { - if (force_reseed_count <= 0) { - se_writel(se_dev, - SE_RNG_CONFIG_MODE(DRBG_MODE_FORCE_RESEED) | - SE_RNG_CONFIG_SRC(DRBG_SRC_ENTROPY), - SE_RNG_CONFIG_REG_OFFSET); - force_reseed_count = RNG_RESEED_INTERVAL; - } else { - se_writel(se_dev, - SE_RNG_CONFIG_MODE(DRBG_MODE_NORMAL) | - SE_RNG_CONFIG_SRC(DRBG_SRC_ENTROPY), - SE_RNG_CONFIG_REG_OFFSET); - } - --force_reseed_count; - - se_writel(se_dev, RNG_RESEED_INTERVAL, - SE_RNG_RESEED_INTERVAL_REG_OFFSET); - } - - pr_debug("%s:%d crypto_config val = 0x%x\n", __func__, __LINE__, val); - - return val; -} - -static int tegra_se_send_sha_data(struct tegra_se_dev *se_dev, - struct ahash_request *req, - struct tegra_se_sha_context *sha_ctx, - u32 count, bool last) -{ - int err = 0; - u32 cmdbuf_num_words = 0, i = 0; - u32 *cmdbuf_cpuvaddr = NULL; - dma_addr_t cmdbuf_iova = 0; - struct tegra_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_se_ll *src_ll = se_dev->src_ll; - struct tegra_se_ll *dst_ll = se_dev->dst_ll; - struct tegra_se_cmdbuf *cmdbuf; - unsigned int total = count, val; - u64 msg_len; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - cmdbuf_cpuvaddr = cmdbuf->addr; - cmdbuf_iova = cmdbuf->iova; - - while (total) { - if (src_ll->data_len & SE_BUFF_SIZE_MASK) { - return -EINVAL; - } - - if (total == count) { - cmdbuf_cpuvaddr[i++] = nvhost_opcode_setpayload(8); - cmdbuf_cpuvaddr[i++] = __nvhost_opcode_incr_w( - se_dev->opcode_addr + - SE_SHA_MSG_LENGTH_OFFSET); - - /* Program message length in next 4 four registers */ - if (sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE128 - || sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE256) { - /* Reduce 4 extra bits in padded 8 bits */ - msg_len = (count * 8) - 4; - } else - msg_len = (count * 8); - cmdbuf_cpuvaddr[i++] = - (sha_ctx->total_count * 8); - cmdbuf_cpuvaddr[i++] = (u32)(msg_len >> 32); - cmdbuf_cpuvaddr[i++] = 0; - cmdbuf_cpuvaddr[i++] = 0; - - /* Program message left in next 4 four registers */ - - /* If it is not last request, length of message left - * should be more than input buffer length. - */ - if (!last) - cmdbuf_cpuvaddr[i++] = - (u32)(msg_len + 8) & 0xFFFFFFFFULL; - else - cmdbuf_cpuvaddr[i++] = - (u32)((msg_len) & 0xFFFFFFFFULL); - cmdbuf_cpuvaddr[i++] = (u32)(msg_len >> 32); - cmdbuf_cpuvaddr[i++] = 0; - cmdbuf_cpuvaddr[i++] = 0; - - cmdbuf_cpuvaddr[i++] = nvhost_opcode_setpayload(6); - cmdbuf_cpuvaddr[i++] = __nvhost_opcode_incr_w( - se_dev->opcode_addr); - - cmdbuf_cpuvaddr[i++] = req_ctx->config; - - if (sha_ctx->is_first) - cmdbuf_cpuvaddr[i++] = - SE4_HW_INIT_HASH(HW_INIT_HASH_ENABLE); - else - cmdbuf_cpuvaddr[i++] = - SE4_HW_INIT_HASH(HW_INIT_HASH_DISABLE); - } else { - cmdbuf_cpuvaddr[i++] = nvhost_opcode_setpayload(4); - cmdbuf_cpuvaddr[i++] = __nvhost_opcode_incr_w( - se_dev->opcode_addr + - SE4_SHA_IN_ADDR_OFFSET); - } - cmdbuf_cpuvaddr[i++] = src_ll->addr; - cmdbuf_cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(src_ll->addr)) | - SE_ADDR_HI_SZ(src_ll->data_len)); - cmdbuf_cpuvaddr[i++] = dst_ll->addr; - cmdbuf_cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(dst_ll->addr)) | - SE_ADDR_HI_SZ(dst_ll->data_len)); - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) - /* For SHAKE128/SHAKE256 program digest size */ -#if 0 - if (sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE128 - || sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE256) { - cmdbuf_cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cmdbuf_cpuvaddr[i++] = __nvhost_opcode_nonincr_w( - se_dev->opcode_addr + - SE_SHA_HASH_LENGTH); - cmdbuf_cpuvaddr[i++] = (req->dst_size * 8) << 2; - } -#endif -#endif - cmdbuf_cpuvaddr[i++] = nvhost_opcode_setpayload(1); - cmdbuf_cpuvaddr[i++] = __nvhost_opcode_nonincr_w( - se_dev->opcode_addr + - SE_SHA_OPERATION_OFFSET); - - val = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - if (total == count) { - if (total == src_ll->data_len) - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_START); - else - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(OP_START); - } else { - if (total == src_ll->data_len) - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_RESTART_IN); - else - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(OP_RESTART_IN); - } - cmdbuf_cpuvaddr[i++] = val; - total -= src_ll->data_len; - src_ll++; - } - - cmdbuf_num_words = i; - - err = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, cmdbuf_num_words, SHA_CB); - if (err) - dev_err(se_dev->dev, "Channel submission fail err = %d\n", err); - - return err; -} - -static int tegra_se_read_cmac_result(struct tegra_se_dev *se_dev, u8 *pdata, - u32 nbytes, bool swap32) -{ - u32 *result = (u32 *)pdata; - u32 i; - - for (i = 0; i < nbytes / 4; i++) { - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - result[i] = se_readl(se_dev, - se_dev->opcode_addr + - T234_SE_CMAC_RESULT_REG_OFFSET + - (i * sizeof(u32))); - } else { - result[i] = se_readl(se_dev, SE_CMAC_RESULT_REG_OFFSET + - (i * sizeof(u32))); - } - if (swap32) - result[i] = be32_to_cpu(result[i]); - } - - return 0; -} - -static int tegra_se_clear_cmac_result(struct tegra_se_dev *se_dev, u32 nbytes) -{ - u32 i; - - for (i = 0; i < nbytes / 4; i++) { - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - se_writel(se_dev, 0x0, se_dev->opcode_addr + - T234_SE_CMAC_RESULT_REG_OFFSET + - (i * sizeof(u32))); - } else { - se_writel(se_dev, 0x0, - SE_CMAC_RESULT_REG_OFFSET + - (i * sizeof(u32))); - } - } - - return 0; -} - -static void tegra_se_send_data(struct tegra_se_dev *se_dev, - struct tegra_se_req_context *req_ctx, - struct skcipher_request *req, u32 nbytes, - unsigned int opcode_addr, u32 *cpuvaddr) -{ - u32 cmdbuf_num_words = 0, i = 0; - u32 total, val, rbits; - unsigned int restart_op; - struct tegra_se_ll *src_ll; - struct tegra_se_ll *dst_ll; - - if (req) { - src_ll = se_dev->aes_src_ll; - dst_ll = se_dev->aes_dst_ll; - src_ll->addr = se_dev->aes_cur_addr; - dst_ll->addr = se_dev->aes_cur_addr; - src_ll->data_len = req->cryptlen; - dst_ll->data_len = req->cryptlen; - } else { - src_ll = se_dev->src_ll; - dst_ll = se_dev->dst_ll; - } - - i = se_dev->cmdbuf_cnt; - total = nbytes; - - /* Create Gather Buffer Command */ - while (total) { - if (total == nbytes) { - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + - SE_AES_CRYPTO_LAST_BLOCK_OFFSET, 1); - val = ((nbytes / TEGRA_SE_AES_BLOCK_SIZE)) - 1; - - if (req_ctx->op_mode == SE_AES_OP_MODE_CMAC && - se_dev->chipdata->kac_type == SE_KAC_T23X) { - rbits = (nbytes % TEGRA_SE_AES_BLOCK_SIZE) * 8; - if (rbits) { - val++; - val |= - SE_LAST_BLOCK_RESIDUAL_BITS(rbits); - } - } - - cpuvaddr[i++] = val; - cpuvaddr[i++] = __nvhost_opcode_incr(opcode_addr, 6); - cpuvaddr[i++] = req_ctx->config; - cpuvaddr[i++] = req_ctx->crypto_config; - } else { - cpuvaddr[i++] = __nvhost_opcode_incr( - opcode_addr + SE_AES_IN_ADDR_OFFSET, 4); - } - - cpuvaddr[i++] = (u32)(src_ll->addr); - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(src_ll->addr)) | - SE_ADDR_HI_SZ(src_ll->data_len)); - cpuvaddr[i++] = (u32)(dst_ll->addr); - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(dst_ll->addr)) | - SE_ADDR_HI_SZ(dst_ll->data_len)); - - if (req_ctx->op_mode == SE_AES_OP_MODE_CMAC || - req_ctx->op_mode == SE_AES_OP_MODE_CBC_MAC) - restart_op = OP_RESTART_IN; - else if (req_ctx->op_mode == SE_AES_OP_MODE_RNG_DRBG) - restart_op = OP_RESTART_OUT; - else - restart_op = OP_RESTART_INOUT; - - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + SE_AES_OPERATION_OFFSET, 1); - - val = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - if (total == nbytes) { - if (total == src_ll->data_len) { - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_START); - - if ((req_ctx->op_mode == SE_AES_OP_MODE_CMAC || - req_ctx->op_mode == SE_AES_OP_MODE_CBC_MAC) && - se_dev->chipdata->kac_type == SE_KAC_T23X) - val |= SE_OPERATION_FINAL(FINAL_TRUE); - } else { - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(OP_START); - } - } else { - if (total == src_ll->data_len) { - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(restart_op); - - if ((req_ctx->op_mode == SE_AES_OP_MODE_CMAC || - req_ctx->op_mode == SE_AES_OP_MODE_CBC_MAC) && - se_dev->chipdata->kac_type == SE_KAC_T23X) - val |= SE_OPERATION_FINAL(FINAL_TRUE); - } else { - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(restart_op); - } - } - cpuvaddr[i++] = val; - total -= src_ll->data_len; - src_ll++; - dst_ll++; - } - - cmdbuf_num_words = i; - se_dev->cmdbuf_cnt = i; - if (req) - se_dev->aes_cur_addr += req->cryptlen; -} - -static void tegra_se_send_gcm_data(struct tegra_se_dev *se_dev, - struct tegra_se_req_context *req_ctx, - u32 nbytes, unsigned int opcode_addr, - u32 *cpuvaddr, - enum tegra_se_aes_gcm_mode sub_mode) -{ - u32 cmdbuf_num_words = 0, i = 0; - u32 total, val, rbits; - struct tegra_se_ll *src_ll; - struct tegra_se_ll *dst_ll; - unsigned int restart_op; - - src_ll = se_dev->src_ll; - dst_ll = se_dev->dst_ll; - - i = se_dev->cmdbuf_cnt; - total = nbytes; - - /* Create Gather Buffer Commands in multiple buffer */ - /* (not multiple task) format. */ - - /* Program LAST_BLOCK */ - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + - SE_AES_CRYPTO_LAST_BLOCK_OFFSET, 1); - val = ((nbytes / TEGRA_SE_AES_BLOCK_SIZE)) - 1; - rbits = (nbytes % TEGRA_SE_AES_BLOCK_SIZE) * 8; - if (rbits) { - val++; - val |= SE_LAST_BLOCK_RESIDUAL_BITS(rbits); - } - cpuvaddr[i++] = val; - - /* Program config and crypto config */ - cpuvaddr[i++] = __nvhost_opcode_incr(opcode_addr, 2); - cpuvaddr[i++] = req_ctx->config; - cpuvaddr[i++] = req_ctx->crypto_config; - - while (total) { - /* Program IN and OUT addresses */ - if (sub_mode != SE_AES_GMAC) { - cpuvaddr[i++] = __nvhost_opcode_incr( - opcode_addr + SE_AES_IN_ADDR_OFFSET, 4); - } else { - cpuvaddr[i++] = __nvhost_opcode_incr( - opcode_addr + SE_AES_IN_ADDR_OFFSET, 2); - } - cpuvaddr[i++] = (u32)(src_ll->addr); - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(src_ll->addr)) | - SE_ADDR_HI_SZ(src_ll->data_len)); - if (sub_mode != SE_AES_GMAC) { - cpuvaddr[i++] = (u32)(dst_ll->addr); - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(dst_ll->addr)) - | SE_ADDR_HI_SZ(dst_ll->data_len)); - } - - /* Program operation register */ - if (sub_mode == SE_AES_GCM_ENC || sub_mode == SE_AES_GCM_DEC) - restart_op = OP_RESTART_INOUT; - else - restart_op = OP_RESTART_IN; - - cpuvaddr[i++] = __nvhost_opcode_nonincr( - opcode_addr + SE_AES_OPERATION_OFFSET, 1); - val = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - if (req_ctx->init != true) {/* GMAC not operated */ - val |= SE_OPERATION_INIT(INIT_TRUE); - req_ctx->init = true; - } - val |= SE_OPERATION_FINAL(FINAL_TRUE); - if (total == nbytes) { - if (total == src_ll->data_len) { - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_START); - } else { - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(OP_START); - } - } else { - if (total == src_ll->data_len) { - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(restart_op); - } else { - val |= SE_OPERATION_LASTBUF(LASTBUF_FALSE) | - SE_OPERATION_OP(restart_op); - } - } - cpuvaddr[i++] = val; - total -= src_ll->data_len; - src_ll++; - dst_ll++; - } - - cmdbuf_num_words = i; - se_dev->cmdbuf_cnt = i; -} - -static int tegra_map_sg(struct device *dev, struct scatterlist *sg, - unsigned int nents, enum dma_data_direction dir, - struct tegra_se_ll *se_ll, u32 total) -{ - u32 total_loop = 0; - int ret = 0; - - total_loop = total; - while (sg && (total_loop > 0)) { - ret = dma_map_sg(dev, sg, nents, dir); - if (!ret) { - dev_err(dev, "dma_map_sg error\n"); - return ret; - } - se_ll->addr = sg_dma_address(sg); - se_ll->data_len = min((size_t)sg->length, (size_t)total_loop); - total_loop -= min((size_t)sg->length, (size_t)total_loop); - sg = sg_next(sg); - se_ll++; - } - - return ret; -} - -static int tegra_se_setup_ablk_req(struct tegra_se_dev *se_dev) -{ - struct skcipher_request *req; - void *buf; - int i, ret = 0; - u32 num_sgs; - unsigned int index = 0; - - if (unlikely(se_dev->dynamic_mem)) { - if (se_dev->ioc) - se_dev->aes_buf = dma_alloc_coherent( - se_dev->dev, se_dev->gather_buf_sz, - &se_dev->aes_buf_addr, GFP_KERNEL); - else - se_dev->aes_buf = kmalloc(se_dev->gather_buf_sz, - GFP_KERNEL); - if (!se_dev->aes_buf) - return -ENOMEM; - buf = se_dev->aes_buf; - } else { - index = se_dev->aesbuf_entry + 1; - for (i = 0; i < SE_MAX_AESBUF_TIMEOUT; i++, index++) { - index = index % SE_MAX_AESBUF_ALLOC; - if (atomic_read(&se_dev->aes_buf_stat[index])) { - se_dev->aesbuf_entry = index; - atomic_set(&se_dev->aes_buf_stat[index], 0); - break; - } - if (i % SE_MAX_AESBUF_ALLOC == 0) - udelay(SE_WAIT_UDELAY); - } - - if (i == SE_MAX_AESBUF_TIMEOUT) { - pr_err("aes_buffer not available\n"); - return -ETIMEDOUT; - } - buf = se_dev->aes_bufs[index]; - } - - for (i = 0; i < se_dev->req_cnt; i++) { - req = se_dev->reqs[i]; - - num_sgs = tegra_se_count_sgs(req->src, req->cryptlen); - - if (num_sgs == 1) - memcpy(buf, sg_virt(req->src), req->cryptlen); - else - sg_copy_to_buffer(req->src, num_sgs, buf, req->cryptlen); - buf += req->cryptlen; - } - - if (se_dev->ioc) { - if (unlikely(se_dev->dynamic_mem)) - se_dev->aes_addr = se_dev->aes_buf_addr; - else - se_dev->aes_addr = se_dev->aes_buf_addrs[index]; - } else { - if (unlikely(se_dev->dynamic_mem)) - sg_init_one(&se_dev->sg, se_dev->aes_buf, - se_dev->gather_buf_sz); - else - sg_init_one(&se_dev->sg, se_dev->aes_bufs[index], - se_dev->gather_buf_sz); - - ret = dma_map_sg(se_dev->dev, &se_dev->sg, 1, - DMA_BIDIRECTIONAL); - if (!ret) { - dev_err(se_dev->dev, "dma_map_sg error\n"); - - if (unlikely(se_dev->dynamic_mem)) - kfree(se_dev->aes_buf); - else - atomic_set(&se_dev->aes_buf_stat[index], 1); - return ret; - } - - se_dev->aes_addr = sg_dma_address(&se_dev->sg); - } - - se_dev->aes_cur_addr = se_dev->aes_addr; - - return 0; -} - -static int tegra_se_prepare_cmdbuf(struct tegra_se_dev *se_dev, - struct tegra_se_cmdbuf *cmdbuf) -{ - int i, ret = 0; - u32 *cpuvaddr = cmdbuf->addr; - struct tegra_se_aes_context *aes_ctx; - struct skcipher_request *req; - struct tegra_se_req_context *req_ctx; - struct crypto_skcipher *tfm; - u32 keylen; - - pr_debug("%s:%d req_cnt = %d\n", __func__, __LINE__, se_dev->req_cnt); - - for (i = 0; i < se_dev->req_cnt; i++) { - req = se_dev->reqs[i]; - tfm = crypto_skcipher_reqtfm(req); - aes_ctx = crypto_skcipher_ctx(tfm); - /* Ensure there is valid slot info */ - if (!aes_ctx->slot) { - dev_err(se_dev->dev, "Invalid AES Ctx Slot\n"); - return -EINVAL; - } - - if (aes_ctx->is_key_in_mem) { - if (strcmp(crypto_tfm_alg_name(&tfm->base), - "xts(aes)")) { - ret = tegra_se_send_key_data( - se_dev, aes_ctx->key, aes_ctx->keylen, - aes_ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_KEY_IN_MEM, - se_dev->opcode_addr, cmdbuf, - AES_CB); - } else { - keylen = aes_ctx->keylen / 2; - ret = tegra_se_send_key_data(se_dev, - aes_ctx->key, keylen, - aes_ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_XTS_KEY1_IN_MEM, - se_dev->opcode_addr, cmdbuf, - AES_CB); - if (ret) { - dev_err(se_dev->dev, "Error in setting Key\n"); - goto out; - } - - ret = tegra_se_send_key_data(se_dev, - aes_ctx->key + keylen, keylen, - aes_ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_XTS_KEY2_IN_MEM, - se_dev->opcode_addr, cmdbuf, - AES_CB); - } - if (ret) { - dev_err(se_dev->dev, "Error in setting Key\n"); - goto out; - } - } - - req_ctx = skcipher_request_ctx(req); - - if (req->iv) { - if (req_ctx->op_mode == SE_AES_OP_MODE_CTR || - req_ctx->op_mode == SE_AES_OP_MODE_XTS) { - tegra_se_send_ctr_seed(se_dev, (u32 *)req->iv, - se_dev->opcode_addr, - cpuvaddr); - } else { - ret = tegra_se_send_key_data( - se_dev, req->iv, TEGRA_SE_AES_IV_SIZE, - aes_ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_UPDTDIV, se_dev->opcode_addr, - cmdbuf, AES_CB); - } - } - - if (ret) - return ret; - - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - req_ctx->encrypt, - aes_ctx->keylen); - - if (aes_ctx->slot2 != NULL) { - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, - req_ctx->encrypt, - aes_ctx->slot->slot_num, - aes_ctx->slot2->slot_num, - false); - } else { - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, - req_ctx->encrypt, - aes_ctx->slot->slot_num, - 0, - false); - } - - tegra_se_send_data(se_dev, req_ctx, req, req->cryptlen, - se_dev->opcode_addr, cpuvaddr); - } - -out: - return ret; -} - -static void tegra_se_process_new_req(struct tegra_se_dev *se_dev) -{ - struct skcipher_request *req; - u32 *cpuvaddr = NULL; - dma_addr_t iova = 0; - struct tegra_se_cmdbuf *cmdbuf; - int err = 0, i = 0; - - pr_debug("%s:%d start req_cnt = %d\n", __func__, __LINE__, - se_dev->req_cnt); - - tegra_se_boost_cpu_freq(se_dev); - - for (i = 0; i < se_dev->req_cnt; i++) { - req = se_dev->reqs[i]; - if (req->cryptlen != SE_STATIC_MEM_ALLOC_BUFSZ) { - se_dev->dynamic_mem = true; - break; - } - } - - err = tegra_se_setup_ablk_req(se_dev); - if (err) - goto mem_out; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - goto mem_out; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - err = tegra_se_prepare_cmdbuf(se_dev, cmdbuf); - if (err) - goto cmdbuf_out; - - err = tegra_se_channel_submit_gather(se_dev, cmdbuf, 0, - se_dev->cmdbuf_cnt, AES_CB); - if (err) - goto cmdbuf_out; - se_dev->dynamic_mem = false; - - pr_debug("%s:%d complete\n", __func__, __LINE__); - - return; -cmdbuf_out: - dma_unmap_sg(se_dev->dev, &se_dev->sg, 1, DMA_BIDIRECTIONAL); - //if (se_dev->dynamic_mem) - // kfree(se_dev->aes_buf); -mem_out: - for (i = 0; i < se_dev->req_cnt; i++) { - req = se_dev->reqs[i]; - req->base.complete(&req->base, err); - } - se_dev->req_cnt = 0; - se_dev->gather_buf_sz = 0; - se_dev->cmdbuf_cnt = 0; - se_dev->dynamic_mem = false; -} - -static void tegra_se_work_handler(struct work_struct *work) -{ - struct tegra_se_dev *se_dev = container_of(work, struct tegra_se_dev, - se_work); - struct crypto_async_request *async_req = NULL; - struct crypto_async_request *backlog = NULL; - struct skcipher_request *req; - bool process_requests; - - mutex_lock(&se_dev->mtx); - do { - process_requests = false; - mutex_lock(&se_dev->lock); - do { - backlog = crypto_get_backlog(&se_dev->queue); - async_req = crypto_dequeue_request(&se_dev->queue); - if (!async_req) - se_dev->work_q_busy = false; - - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } - - if (async_req) { - req = skcipher_request_cast(async_req); - se_dev->reqs[se_dev->req_cnt] = req; - se_dev->gather_buf_sz += req->cryptlen; - se_dev->req_cnt++; - process_requests = true; - } else { - break; - } - } while (se_dev->queue.qlen && - (se_dev->req_cnt < SE_MAX_TASKS_PER_SUBMIT)); - mutex_unlock(&se_dev->lock); - - if (process_requests) - tegra_se_process_new_req(se_dev); - } while (se_dev->work_q_busy); - mutex_unlock(&se_dev->mtx); -} - -static int tegra_se_aes_queue_req(struct tegra_se_dev *se_dev, - struct skcipher_request *req) -{ - int err = 0; - - mutex_lock(&se_dev->lock); - err = crypto_enqueue_request(&se_dev->queue, &req->base); - - if (!se_dev->work_q_busy) { - se_dev->work_q_busy = true; - mutex_unlock(&se_dev->lock); - queue_work(se_dev->se_work_q, &se_dev->se_work); - } else { - mutex_unlock(&se_dev->lock); - } - - return err; -} - -static int tegra_se_aes_xts_encrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - if (!req_ctx->se_dev) { - pr_err("Device is NULL\n"); - return -ENODEV; - } - - req_ctx->encrypt = true; - req_ctx->op_mode = SE_AES_OP_MODE_XTS; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_xts_decrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - if (!req_ctx->se_dev) { - pr_err("Device is NULL\n"); - return -ENODEV; - } - - req_ctx->encrypt = false; - req_ctx->op_mode = SE_AES_OP_MODE_XTS; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_cbc_encrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = true; - req_ctx->op_mode = SE_AES_OP_MODE_CBC; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_cbc_decrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = false; - req_ctx->op_mode = SE_AES_OP_MODE_CBC; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ecb_encrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = true; - req_ctx->op_mode = SE_AES_OP_MODE_ECB; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ecb_decrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = false; - req_ctx->op_mode = SE_AES_OP_MODE_ECB; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ctr_encrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = true; - req_ctx->op_mode = SE_AES_OP_MODE_CTR; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ctr_decrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = false; - req_ctx->op_mode = SE_AES_OP_MODE_CTR; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ofb_encrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = true; - req_ctx->op_mode = SE_AES_OP_MODE_OFB; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static int tegra_se_aes_ofb_decrypt(struct skcipher_request *req) -{ - struct tegra_se_req_context *req_ctx = skcipher_request_ctx(req); - - req_ctx->se_dev = se_devices[SE_AES]; - req_ctx->encrypt = false; - req_ctx->op_mode = SE_AES_OP_MODE_OFB; - - return tegra_se_aes_queue_req(req_ctx->se_dev, req); -} - -static void tegra_se_init_aesbuf(struct tegra_se_dev *se_dev) -{ - int i; - void *buf = se_dev->total_aes_buf; - dma_addr_t buf_addr = se_dev->total_aes_buf_addr; - - for (i = 0; i < SE_MAX_AESBUF_ALLOC; i++) { - se_dev->aes_bufs[i] = buf + (i * SE_MAX_GATHER_BUF_SZ); - if (se_dev->ioc) - se_dev->aes_buf_addrs[i] = buf_addr + - (i * SE_MAX_GATHER_BUF_SZ); - atomic_set(&se_dev->aes_buf_stat[i], 1); - } -} - -static int tegra_se_aes_setkey(struct crypto_skcipher *tfm, - const u8 *key, u32 keylen) -{ - struct tegra_se_aes_context *ctx = crypto_tfm_ctx(&tfm->base); - struct tegra_se_dev *se_dev; - struct tegra_se_slot *pslot; - struct tegra_se_cmdbuf *cmdbuf; - u8 *pdata = (u8 *)key; - int ret = 0; - u32 *cpuvaddr = NULL; - - se_dev = se_devices[SE_AES]; - - if (!ctx || !se_dev) { - pr_err("invalid context or dev"); - return -EINVAL; - } - ctx->se_dev = se_dev; - - if (((keylen & SE_KEY_LEN_MASK) != TEGRA_SE_KEY_128_SIZE) && - ((keylen & SE_KEY_LEN_MASK) != TEGRA_SE_KEY_192_SIZE) && - ((keylen & SE_KEY_LEN_MASK) != TEGRA_SE_KEY_256_SIZE) && - ((keylen & SE_KEY_LEN_MASK) != TEGRA_SE_KEY_512_SIZE)) { - dev_err(se_dev->dev, "invalid key size"); - return -EINVAL; - } - - if ((keylen >> SE_MAGIC_PATTERN_OFFSET) == SE_STORE_KEY_IN_MEM) { - ctx->is_key_in_mem = true; - ctx->keylen = (keylen & SE_KEY_LEN_MASK); - ctx->slot = &keymem_slot; - memcpy(ctx->key, key, ctx->keylen); - return 0; - } - ctx->is_key_in_mem = false; - - mutex_lock(&se_dev->mtx); - if (key) { - if (!ctx->slot || - (ctx->slot && - ctx->slot->slot_num == ssk_slot.slot_num)) { - pslot = tegra_se_alloc_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - ctx->slot = pslot; - ctx->slot2 = NULL; - if (!strcmp(crypto_tfm_alg_name(&tfm->base), - "xts(aes)")) { - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - pslot = tegra_se_alloc_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - ctx->slot2 = pslot; - } - } - } - ctx->keylen = keylen; - } else if ((keylen >> SE_MAGIC_PATTERN_OFFSET) == SE_MAGIC_PATTERN) { - ctx->slot = &pre_allocated_slot; - spin_lock(&key_slot_lock); - pre_allocated_slot.slot_num = - ((keylen & SE_SLOT_NUM_MASK) >> SE_SLOT_POSITION); - spin_unlock(&key_slot_lock); - ctx->keylen = (keylen & SE_KEY_LEN_MASK); - goto out; - } else { - tegra_se_free_key_slot(ctx->slot); - ctx->slot = &ssk_slot; - ctx->keylen = AES_KEYSIZE_128; - goto out; - } - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto out; - } - - cpuvaddr = cmdbuf->addr; - - /* load the key */ - - if (strcmp(crypto_tfm_alg_name(&tfm->base), "xts(aes)")) { - ret = tegra_se_send_key_data( - se_dev, pdata, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_KEY, se_dev->opcode_addr, - cmdbuf, AES_CB); - } else { - keylen = keylen / 2; - ret = tegra_se_send_key_data( - se_dev, pdata, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_XTS_KEY1, se_dev->opcode_addr, - cmdbuf, AES_CB); - if (ret) - goto keyslt_free; - - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - ret = tegra_se_send_key_data(se_dev, pdata + keylen, - keylen, ctx->slot2->slot_num, - SE_KEY_TABLE_TYPE_XTS_KEY2, - se_dev->opcode_addr, - cmdbuf, AES_CB); - } else { - ret = tegra_se_send_key_data(se_dev, pdata + keylen, - keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_XTS_KEY2, - se_dev->opcode_addr, - cmdbuf, AES_CB); - } - } -keyslt_free: - if (ret) { - tegra_se_free_key_slot(ctx->slot); - tegra_se_free_key_slot(ctx->slot2); - } -out: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_cra_init(struct crypto_skcipher *tfm) -{ - tfm->reqsize = sizeof(struct tegra_se_req_context); - - return 0; -} - -static void tegra_se_aes_cra_exit(struct crypto_skcipher *tfm) -{ - struct tegra_se_aes_context *ctx = crypto_tfm_ctx(&tfm->base); - - tegra_se_free_key_slot(ctx->slot); - tegra_se_free_key_slot(ctx->slot2); - ctx->slot = NULL; - ctx->slot2 = NULL; -} - -static int tegra_se_rng_drbg_init(struct crypto_tfm *tfm) -{ - struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); - struct tegra_se_dev *se_dev; - - se_dev = se_devices[SE_DRBG]; - - mutex_lock(&se_dev->mtx); - - rng_ctx->se_dev = se_dev; - rng_ctx->dt_buf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_RNG_DT_SIZE, - &rng_ctx->dt_buf_adr, GFP_KERNEL); - if (!rng_ctx->dt_buf) { - dev_err(se_dev->dev, "can not allocate rng dma buffer"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - - rng_ctx->rng_buf = dma_alloc_coherent( - rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, - &rng_ctx->rng_buf_adr, GFP_KERNEL); - if (!rng_ctx->rng_buf) { - dev_err(se_dev->dev, "can not allocate rng dma buffer"); - dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, - rng_ctx->dt_buf, rng_ctx->dt_buf_adr); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - mutex_unlock(&se_dev->mtx); - - return 0; -} - -static int tegra_se_rng_drbg_get_random(struct crypto_rng *tfm, const u8 *src, - unsigned int slen, u8 *rdata, - unsigned int dlen) -{ - struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm); - struct tegra_se_dev *se_dev = rng_ctx->se_dev; - u8 *rdata_addr; - int ret = 0, j; - unsigned int num_blocks, data_len = 0; - struct tegra_se_cmdbuf *cmdbuf; - struct tegra_se_req_context *req_ctx = - devm_kzalloc(se_dev->dev, - sizeof(struct tegra_se_req_context), GFP_KERNEL); - if (!req_ctx) - return -ENOMEM; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_ctx; - } - - num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE); - data_len = (dlen % TEGRA_SE_RNG_DT_SIZE); - if (data_len == 0) - num_blocks = num_blocks - 1; - - mutex_lock(&se_dev->mtx); - req_ctx->op_mode = SE_AES_OP_MODE_RNG_DRBG; - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, true, - TEGRA_SE_KEY_128_SIZE); - - if (se_dev->chipdata->kac_type != SE_KAC_T23X) - req_ctx->crypto_config = tegra_se_get_crypto_config(se_dev, - req_ctx->op_mode, - true, 0, 0, true); - for (j = 0; j <= num_blocks; j++) { - se_dev->src_ll->addr = rng_ctx->dt_buf_adr; - se_dev->src_ll->data_len = TEGRA_SE_RNG_DT_SIZE; - se_dev->dst_ll->addr = rng_ctx->rng_buf_adr; - se_dev->dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE; - - tegra_se_send_data(se_dev, req_ctx, NULL, TEGRA_SE_RNG_DT_SIZE, - se_dev->opcode_addr, cmdbuf->addr); - ret = tegra_se_channel_submit_gather( - se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, - NONE); - if (ret) - break; - - rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE)); - - if (data_len && num_blocks == j) - memcpy(rdata_addr, rng_ctx->rng_buf, data_len); - else - memcpy(rdata_addr, rng_ctx->rng_buf, - TEGRA_SE_RNG_DT_SIZE); - } - - if (!ret) - ret = dlen; - - mutex_unlock(&se_dev->mtx); -free_ctx: - devm_kfree(se_dev->dev, req_ctx); - - return ret; -} - -static int tegra_se_rng_drbg_reset(struct crypto_rng *tfm, const u8 *seed, - unsigned int slen) -{ - return 0; -} - -static void tegra_se_rng_drbg_exit(struct crypto_tfm *tfm) -{ - struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); - - if (rng_ctx->dt_buf) - dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, - rng_ctx->dt_buf, rng_ctx->dt_buf_adr); - - if (rng_ctx->rng_buf) - dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, - rng_ctx->rng_buf, rng_ctx->rng_buf_adr); - - rng_ctx->se_dev = NULL; -} - -static void tegra_se_sha_copy_residual_data( - struct ahash_request *req, struct tegra_se_sha_context *sha_ctx, - u32 bytes_to_copy) -{ - struct sg_mapping_iter miter; - unsigned int sg_flags, total = 0; - u32 num_sgs, last_block_bytes = bytes_to_copy; - unsigned long flags; - struct scatterlist *src_sg; - u8 *temp_buffer = NULL; - - src_sg = req->src; - num_sgs = tegra_se_count_sgs(req->src, req->nbytes); - sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; - sg_miter_start(&miter, req->src, num_sgs, sg_flags); - local_irq_save(flags); - - temp_buffer = sha_ctx->sha_buf[0]; - while (sg_miter_next(&miter) && total < req->nbytes) { - unsigned int len; - - len = min(miter.length, (size_t)(req->nbytes - total)); - if ((req->nbytes - (total + len)) <= last_block_bytes) { - bytes_to_copy = last_block_bytes - - (req->nbytes - (total + len)); - memcpy(temp_buffer, miter.addr + (len - bytes_to_copy), - bytes_to_copy); - last_block_bytes -= bytes_to_copy; - temp_buffer += bytes_to_copy; - } - total += len; - } - sg_miter_stop(&miter); - local_irq_restore(flags); -} - -static int tegra_se_sha_process_buf(struct ahash_request *req, bool is_last, - bool process_cur_req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_sha_context *sha_ctx = crypto_ahash_ctx(tfm); - struct tegra_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - struct scatterlist *src_sg = req->src; - struct tegra_se_ll *src_ll; - struct tegra_se_ll *dst_ll; - u32 current_total = 0, num_sgs, bytes_process_in_req = 0, num_blks; - int err = 0, dst_len = crypto_ahash_digestsize(tfm); - - pr_debug("%s:%d process sha buffer\n", __func__, __LINE__); - - num_sgs = tegra_se_count_sgs(req->src, req->nbytes); - if (num_sgs > SE_MAX_SRC_SG_COUNT) { - dev_err(se_dev->dev, "num of SG buffers are more\n"); - return -ENOTSUPP; - } - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -#if 0 - /* For SHAKE128/SHAKE256, digest size can vary */ - if (sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE128 - || sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE256) { - dst_len = req->dst_size; - if (dst_len == 0) { - req->base.complete(&req->base, 0); - return 0; - } - } -#endif -#endif - - req_ctx->hash_result = devm_kzalloc(se_dev->dev, dst_len, GFP_KERNEL); - if (!req_ctx->hash_result) - return -ENOMEM; - - sg_init_one(&se_dev->sg, req_ctx->hash_result, dst_len); - - se_dev->sha_last = is_last; - - if (is_last) { - /* Prepare buf for residual and current data */ - /* Fill sgs entries */ - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - dst_ll = se_dev->dst_ll; - - if (sha_ctx->residual_bytes) { - src_ll->addr = sha_ctx->sha_buf_addr[0]; - src_ll->data_len = sha_ctx->residual_bytes; - src_ll++; - } - - if (process_cur_req) { - bytes_process_in_req = req->nbytes; - err = tegra_map_sg(se_dev->dev, src_sg, 1, - DMA_TO_DEVICE, src_ll, - bytes_process_in_req); - if (!err) - return -EINVAL; - - current_total = req->nbytes + sha_ctx->residual_bytes; - sha_ctx->total_count += current_total; - - err = tegra_map_sg(se_dev->dev, &se_dev->sg, 1, - DMA_FROM_DEVICE, dst_ll, - dst_len); - if (!err) - return -EINVAL; - - se_dev->src_bytes_mapped = bytes_process_in_req; - se_dev->dst_bytes_mapped = dst_len; - se_dev->sha_src_mapped = true; - se_dev->sha_dst_mapped = true; - } else { - current_total = sha_ctx->residual_bytes; - sha_ctx->total_count += current_total; - if (!current_total) { - req->base.complete(&req->base, 0); - return 0; - } - err = tegra_map_sg(se_dev->dev, &se_dev->sg, 1, - DMA_FROM_DEVICE, dst_ll, - dst_len); - if (!err) - return -EINVAL; - - se_dev->dst_bytes_mapped = dst_len; - se_dev->sha_dst_mapped = true; - } - - /* Pad last byte to be 0xff for SHAKE128/256 */ - if (sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE128 - || sha_ctx->op_mode == SE_AES_OP_MODE_SHAKE256) { - while (num_sgs && process_cur_req) { - src_ll++; - num_sgs--; - } - sha_ctx->sha_buf[1][0] = 0xff; - src_ll->addr = sha_ctx->sha_buf_addr[1]; - src_ll->data_len = 1; - src_ll++; - current_total += 1; - } - } else { - current_total = req->nbytes + sha_ctx->residual_bytes; - num_blks = current_total / sha_ctx->blk_size; - - /* If total bytes is less than or equal to one blk, - * copy to residual and return. - */ - if (num_blks <= 1) { - sg_copy_to_buffer(req->src, num_sgs, - sha_ctx->sha_buf[0] + - sha_ctx->residual_bytes, req->nbytes); - sha_ctx->residual_bytes += req->nbytes; - req->base.complete(&req->base, 0); - return 0; - } - - /* Number of bytes to be processed from given request buffers */ - bytes_process_in_req = (num_blks * sha_ctx->blk_size) - - sha_ctx->residual_bytes; - sha_ctx->total_count += bytes_process_in_req; - - /* Fill sgs entries */ - /* If residual bytes are present copy it to second buffer */ - if (sha_ctx->residual_bytes) - memcpy(sha_ctx->sha_buf[1], sha_ctx->sha_buf[0], - sha_ctx->residual_bytes); - - sha_ctx->total_count += sha_ctx->residual_bytes; - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - if (sha_ctx->residual_bytes) { - src_ll->addr = sha_ctx->sha_buf_addr[1]; - src_ll->data_len = sha_ctx->residual_bytes; - src_ll++; - } - - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - dst_ll = se_dev->dst_ll; - - /* Map required bytes to process in given request */ - err = tegra_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE, - src_ll, bytes_process_in_req); - if (!err) - return -EINVAL; - - /* Copy residual data */ - sha_ctx->residual_bytes = current_total - (num_blks * - sha_ctx->blk_size); - tegra_se_sha_copy_residual_data(req, sha_ctx, - sha_ctx->residual_bytes); - - /* Total bytes to be processed */ - current_total = (num_blks * sha_ctx->blk_size); - - err = tegra_map_sg(se_dev->dev, &se_dev->sg, 1, DMA_FROM_DEVICE, - dst_ll, dst_len); - if (!err) - return -EINVAL; - - se_dev->src_bytes_mapped = bytes_process_in_req; - se_dev->dst_bytes_mapped = dst_len; - se_dev->sha_src_mapped = true; - se_dev->sha_dst_mapped = true; - } - - req_ctx->config = tegra_se_get_config(se_dev, sha_ctx->op_mode, - false, is_last); - err = tegra_se_send_sha_data(se_dev, req, sha_ctx, - current_total, is_last); - if (err) { - if (se_dev->sha_src_mapped) - tegra_unmap_sg(se_dev->dev, src_sg, DMA_TO_DEVICE, - bytes_process_in_req); - if (se_dev->sha_dst_mapped) - tegra_unmap_sg(se_dev->dev, &se_dev->sg, - DMA_FROM_DEVICE, dst_len); - return err; - } - sha_ctx->is_first = false; - - pr_debug("%s:%d process sha buffer complete\n", __func__, __LINE__); - - return 0; -} - -static int tegra_se_sha_op(struct ahash_request *req, bool is_last, - bool process_cur_req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_sha_context *sha_ctx = crypto_ahash_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - u32 mode; - int ret; - - struct tegra_se_sha_zero_length_vector zero_vec[] = { - { - .size = SHA1_DIGEST_SIZE, - .digest = "\xda\x39\xa3\xee\x5e\x6b\x4b\x0d" - "\x32\x55\xbf\xef\x95\x60\x18\x90" - "\xaf\xd8\x07\x09", - }, { - .size = SHA224_DIGEST_SIZE, - .digest = "\xd1\x4a\x02\x8c\x2a\x3a\x2b\xc9" - "\x47\x61\x02\xbb\x28\x82\x34\xc4" - "\x15\xa2\xb0\x1f\x82\x8e\xa6\x2a" - "\xc5\xb3\xe4\x2f", - }, { - .size = SHA256_DIGEST_SIZE, - .digest = "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14" - "\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24" - "\x27\xae\x41\xe4\x64\x9b\x93\x4c" - "\xa4\x95\x99\x1b\x78\x52\xb8\x55", - }, { - .size = SHA384_DIGEST_SIZE, - .digest = "\x38\xb0\x60\xa7\x51\xac\x96\x38" - "\x4c\xd9\x32\x7e\xb1\xb1\xe3\x6a" - "\x21\xfd\xb7\x11\x14\xbe\x07\x43" - "\x4c\x0c\xc7\xbf\x63\xf6\xe1\xda" - "\x27\x4e\xde\xbf\xe7\x6f\x65\xfb" - "\xd5\x1a\xd2\xf1\x48\x98\xb9\x5b", - }, { - .size = SHA512_DIGEST_SIZE, - .digest = "\xcf\x83\xe1\x35\x7e\xef\xb8\xbd" - "\xf1\x54\x28\x50\xd6\x6d\x80\x07" - "\xd6\x20\xe4\x05\x0b\x57\x15\xdc" - "\x83\xf4\xa9\x21\xd3\x6c\xe9\xce" - "\x47\xd0\xd1\x3c\x5d\x85\xf2\xb0" - "\xff\x83\x18\xd2\x87\x7e\xec\x2f" - "\x63\xb9\x31\xbd\x47\x41\x7a\x81" - "\xa5\x38\x32\x7a\xf9\x27\xda\x3e", - }, { - .size = SHA3_224_DIGEST_SIZE, - .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7" - "\x3b\x6e\x15\x45\x4f\x0e\xb1\xab" - "\xd4\x59\x7f\x9a\x1b\x07\x8e\x3f" - "\x5b\x5a\x6b\xc7", - }, { - .size = SHA3_256_DIGEST_SIZE, - .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" - "\x51\xc1\x47\x56\xa0\x61\xd6\x62" - "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa" - "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a", - }, { - .size = SHA3_384_DIGEST_SIZE, - .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" - "\x01\x10\x7d\x85\x2e\x4c\x24\x85" - "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61" - "\x99\x5e\x71\xbb\xee\x98\x3a\x2a" - "\xc3\x71\x38\x31\x26\x4a\xdb\x47" - "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04", - }, { - .size = SHA3_512_DIGEST_SIZE, - .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" - "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e" - "\x97\xc9\x82\x16\x4f\xe2\x58\x59" - "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6" - "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c" - "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58" - "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3" - "\x01\x75\x85\x86\x28\x1d\xcd\x26", - } - }; - - if (strcmp(crypto_ahash_alg_name(tfm), "sha1") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA1; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha224") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA224; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha256") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA256; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha384") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA384; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha512") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA512; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-224") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA3_224; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-256") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA3_256; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-384") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA3_384; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-512") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHA3_512; - } else if (strcmp(crypto_ahash_alg_name(tfm), "shake128") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHAKE128; - } else if (strcmp(crypto_ahash_alg_name(tfm), "shake256") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_SHAKE256; - } else if (strcmp(crypto_ahash_alg_name(tfm), "hmac(sha224)") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_HMAC_SHA224; - } else if (strcmp(crypto_ahash_alg_name(tfm), "hmac(sha256)") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_HMAC_SHA256; - } else if (strcmp(crypto_ahash_alg_name(tfm), "hmac(sha384)") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_HMAC_SHA384; - } else if (strcmp(crypto_ahash_alg_name(tfm), "hmac(sha512)") == 0) { - sha_ctx->op_mode = SE_AES_OP_MODE_HMAC_SHA512; - } else { - dev_err(se_dev->dev, "Invalid SHA digest size\n"); - return -EINVAL; - } - - /* If the request length is zero, */ - if (!req->nbytes) { - if (sha_ctx->total_count) { - req->base.complete(&req->base, 0); - return 0; /* allow empty packets */ - } else { /* total_count equals zero */ - if (is_last) { - /* If this is the last packet SW WAR for zero - * length SHA operation since SE HW can't accept - * zero length SHA operation. - */ - mode = sha_ctx->op_mode - SE_AES_OP_MODE_SHA1; - if (mode < ARRAY_SIZE(zero_vec)) - memcpy(req->result, zero_vec[mode].digest, - zero_vec[mode].size); - - req->base.complete(&req->base, 0); - return 0; - } else { - req->base.complete(&req->base, 0); - return 0; /* allow empty first packet */ - } - } - } - - sha_ctx->is_final = false; - if (is_last) - sha_ctx->is_final = true; - - ret = tegra_se_sha_process_buf(req, is_last, process_cur_req); - if (ret) { - mutex_unlock(&se_dev->mtx); - return ret; - } - - return 0; -} - -static int tegra_se_sha_init(struct ahash_request *req) -{ - struct tegra_se_req_context *req_ctx; - struct crypto_ahash *tfm = NULL; - struct tegra_se_sha_context *sha_ctx; - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - - pr_debug("%s:%d start\n", __func__, __LINE__); - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - - tfm = crypto_ahash_reqtfm(req); - if (!tfm) { - dev_err(se_dev->dev, "SHA transform not valid\n"); - return -EINVAL; - } - - sha_ctx = crypto_ahash_ctx(tfm); - if (!sha_ctx) { - dev_err(se_dev->dev, "SHA context not valid\n"); - return -EINVAL; - } - - req_ctx = ahash_request_ctx(req); - if (!req_ctx) { - dev_err(se_dev->dev, "Request context not valid\n"); - return -EINVAL; - } - - mutex_lock(&se_dev->mtx); - - sha_ctx->total_count = 0; - sha_ctx->is_first = true; - sha_ctx->blk_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - sha_ctx->residual_bytes = 0; - mutex_unlock(&se_dev->mtx); - - pr_debug("%s:%d end\n", __func__, __LINE__); - - return 0; -} - -static int tegra_se_sha_update(struct ahash_request *req) -{ - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - int ret = 0; - - pr_debug("%s:%d start\n", __func__, __LINE__); - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - mutex_lock(&se_dev->mtx); - - se_dev->sha_req = req; - - ret = tegra_se_sha_op(req, false, false); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_update failed - %d\n", ret); - else - ret = -EBUSY; - - mutex_unlock(&se_dev->mtx); - - pr_debug("%s:%d end\n", __func__, __LINE__); - - return ret; -} - -static int tegra_se_sha_finup(struct ahash_request *req) -{ - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - int ret = 0; - - pr_debug("%s:%d start\n", __func__, __LINE__); - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - mutex_lock(&se_dev->mtx); - - se_dev->sha_req = req; - - ret = tegra_se_sha_op(req, true, true); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_finup failed - %d\n", ret); - else - ret = -EBUSY; - - mutex_unlock(&se_dev->mtx); - - pr_debug("%s:%d end\n", __func__, __LINE__); - - return ret; -} - -static int tegra_se_sha_final(struct ahash_request *req) -{ - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - int ret = 0; - - pr_debug("%s:%d start\n", __func__, __LINE__); - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - mutex_lock(&se_dev->mtx); - - se_dev->sha_req = req; - - /* Do not process data in given request */ - ret = tegra_se_sha_op(req, true, false); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_final failed - %d\n", ret); - else - ret = -EBUSY; - - mutex_unlock(&se_dev->mtx); - - pr_debug("%s:%d end\n", __func__, __LINE__); - - return ret; -} - -static int tegra_se_sha_digest(struct ahash_request *req) -{ - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - int ret = 0; - - pr_debug("%s:%d start\n", __func__, __LINE__); - - ret = tegra_se_sha_init(req); - if (ret) - return ret; - mutex_lock(&se_dev->mtx); - - se_dev->sha_req = req; - - ret = tegra_se_sha_op(req, true, true); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_digest failed - %d\n", ret); - else - ret = -EBUSY; - - mutex_unlock(&se_dev->mtx); - - pr_debug("%s:%d end\n", __func__, __LINE__); - - return ret; -} - -static int tegra_se_sha_export(struct ahash_request *req, void *out) -{ - return 0; -} - -static int tegra_se_sha_import(struct ahash_request *req, const void *in) -{ - return 0; -} - -static int tegra_se_sha_cra_init(struct crypto_tfm *tfm) -{ - struct tegra_se_sha_context *sha_ctx; - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct tegra_se_sha_context)); - sha_ctx = crypto_tfm_ctx(tfm); - if (!sha_ctx) { - dev_err(se_dev->dev, "SHA context not valid\n"); - return -EINVAL; - } - - mutex_lock(&se_dev->mtx); - sha_ctx->sha_buf[0] = dma_alloc_coherent( - se_dev->dev, (TEGRA_SE_SHA_MAX_BLOCK_SIZE * 2), - &sha_ctx->sha_buf_addr[0], GFP_KERNEL); - if (!sha_ctx->sha_buf[0]) { - dev_err(se_dev->dev, "Cannot allocate memory to sha_buf[0]\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - sha_ctx->sha_buf[1] = dma_alloc_coherent( - se_dev->dev, (TEGRA_SE_SHA_MAX_BLOCK_SIZE * 2), - &sha_ctx->sha_buf_addr[1], GFP_KERNEL); - if (!sha_ctx->sha_buf[1]) { - dma_free_coherent( - se_dev->dev, (TEGRA_SE_SHA_MAX_BLOCK_SIZE * 2), - sha_ctx->sha_buf[0], sha_ctx->sha_buf_addr[0]); - sha_ctx->sha_buf[0] = NULL; - dev_err(se_dev->dev, "Cannot allocate memory to sha_buf[1]\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - mutex_unlock(&se_dev->mtx); - - return 0; -} - -static void tegra_se_sha_cra_exit(struct crypto_tfm *tfm) -{ - struct tegra_se_sha_context *sha_ctx = crypto_tfm_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_SHA]; - int i; - - mutex_lock(&se_dev->mtx); - for (i = 0; i < 2; i++) { - /* dma_free_coherent does not panic if addr is NULL */ - dma_free_coherent( - se_dev->dev, (TEGRA_SE_SHA_MAX_BLOCK_SIZE * 2), - sha_ctx->sha_buf[i], sha_ctx->sha_buf_addr[i]); - sha_ctx->sha_buf[i] = NULL; - } - mutex_unlock(&se_dev->mtx); -} - -static int tegra_se_aes_cmac_export(struct ahash_request *req, void *out) -{ - return 0; -} - -static int tegra_se_aes_cmac_import(struct ahash_request *req, const void *in) -{ - return 0; -} - -static int tegra_t23x_se_aes_cmac_op(struct ahash_request *req, - bool process_cur_req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); - struct tegra_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_se_dev *se_dev; - struct tegra_se_ll *src_ll; - u32 num_sgs, total_bytes = 0; - struct tegra_se_cmdbuf *cmdbuf; - int ret = 0; - - se_dev = se_devices[SE_CMAC]; - - mutex_lock(&se_dev->mtx); - - /* - * SE doesn't support CMAC input where message length is 0 bytes. - * See Bug 200472457. - */ - if (!(cmac_ctx->nbytes + req->nbytes)) { - ret = -EINVAL; - goto out; - } - - if (se_dev->cmdbuf_cnt) { - cmdbuf = cmac_ctx->cmdbuf; - } else { - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto out; - } - } - - if (process_cur_req) { - if ((cmac_ctx->nbytes + req->nbytes) - > (TEGRA_SE_AES_BLOCK_SIZE * 20)) { - dev_err(se_dev->dev, "num of SG buffers bytes are more\n"); - mutex_unlock(&se_dev->mtx); - return -EOPNOTSUPP; - } - - num_sgs = tegra_se_count_sgs(req->src, req->nbytes); - if (num_sgs > SE_MAX_SRC_SG_COUNT) { - dev_err(se_dev->dev, "num of SG buffers are more\n"); - ret = -EDOM; - goto out; - } - - sg_copy_to_buffer(req->src, num_sgs, - cmac_ctx->buf + cmac_ctx->nbytes, req->nbytes); - cmac_ctx->nbytes += req->nbytes; - } - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - src_ll->addr = cmac_ctx->buf_dma_addr; - src_ll->data_len = cmac_ctx->nbytes; - src_ll++; - total_bytes = cmac_ctx->nbytes; - - req_ctx->op_mode = SE_AES_OP_MODE_CMAC; - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - true, cmac_ctx->keylen); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, true, - cmac_ctx->slot->slot_num, 0, true); - - tegra_se_send_data(se_dev, req_ctx, NULL, total_bytes, - se_dev->opcode_addr, - cmdbuf->addr); - ret = tegra_se_channel_submit_gather( - se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - if (ret) - goto out; - - ret = tegra_se_read_cmac_result(se_dev, req->result, - TEGRA_SE_AES_CMAC_DIGEST_SIZE, false); - if (ret) { - dev_err(se_dev->dev, "failed to read cmac result\n"); - goto out; - } - - ret = tegra_se_clear_cmac_result(se_dev, - TEGRA_SE_AES_CMAC_DIGEST_SIZE); - if (ret) { - dev_err(se_dev->dev, "failed to clear cmac result\n"); - goto out; - } - -out: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_cmac_op(struct ahash_request *req, bool process_cur_req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); - struct tegra_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_se_dev *se_dev; - struct tegra_se_ll *src_ll; - struct tegra_se_cmdbuf *cmdbuf; - u32 num_sgs, blocks_to_process, last_block_bytes = 0, offset = 0; - u8 piv[TEGRA_SE_AES_IV_SIZE]; - unsigned int total; - int ret = 0, i = 0; - bool padding_needed = false; - bool use_orig_iv = true; - - se_dev = se_devices[SE_CMAC]; - - if (se_dev->chipdata->kac_type == SE_KAC_T23X) - return tegra_t23x_se_aes_cmac_op(req, process_cur_req); - - mutex_lock(&se_dev->mtx); - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto out; - } - - if (process_cur_req) { - if ((cmac_ctx->nbytes + req->nbytes) - > (TEGRA_SE_AES_BLOCK_SIZE * 20)) { - dev_err(se_dev->dev, "num of SG buffers bytes are more\n"); - mutex_unlock(&se_dev->mtx); - ret = -EOPNOTSUPP; - goto out; - } - - num_sgs = tegra_se_count_sgs(req->src, req->nbytes); - if (num_sgs > SE_MAX_SRC_SG_COUNT) { - dev_err(se_dev->dev, "num of SG buffers are more\n"); - ret = -EDOM; - goto out; - } - - sg_copy_to_buffer(req->src, num_sgs, - cmac_ctx->buf + cmac_ctx->nbytes, req->nbytes); - cmac_ctx->nbytes += req->nbytes; - } - - req_ctx->op_mode = SE_AES_OP_MODE_CMAC; - blocks_to_process = cmac_ctx->nbytes / TEGRA_SE_AES_BLOCK_SIZE; - /* num of bytes less than block size */ - if ((cmac_ctx->nbytes % TEGRA_SE_AES_BLOCK_SIZE) - || !blocks_to_process) { - padding_needed = true; - } else { - /* decrement num of blocks */ - blocks_to_process--; - } - - /* first process all blocks except last block */ - if (blocks_to_process) { - total = blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE; - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - - src_ll->addr = cmac_ctx->buf_dma_addr; - src_ll->data_len = total; - src_ll++; - - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - true, cmac_ctx->keylen); - /* write zero IV */ - memset(piv, 0, TEGRA_SE_AES_IV_SIZE); - - ret = tegra_se_send_key_data( - se_dev, piv, TEGRA_SE_AES_IV_SIZE, - cmac_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV, - se_dev->opcode_addr, - cmdbuf, NONE); - if (ret) - goto out; - - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, true, - cmac_ctx->slot->slot_num, 0, true); - - tegra_se_send_data(se_dev, req_ctx, NULL, total, - se_dev->opcode_addr, - cmdbuf->addr); - ret = tegra_se_channel_submit_gather( - se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - if (ret) - goto out; - - ret = tegra_se_read_cmac_result(se_dev, piv, - TEGRA_SE_AES_CMAC_DIGEST_SIZE, false); - if (ret) { - dev_err(se_dev->dev, "failed to read cmac result\n"); - goto out; - } - - use_orig_iv = false; - } - - /* process last block */ - offset = blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE; - if (padding_needed) { - /* pad with 0x80, 0, 0 ... */ - last_block_bytes = cmac_ctx->nbytes % TEGRA_SE_AES_BLOCK_SIZE; - cmac_ctx->buf[cmac_ctx->nbytes] = 0x80; - for (i = last_block_bytes + 1; i < TEGRA_SE_AES_BLOCK_SIZE; i++) - cmac_ctx->buf[offset + i] = 0; - /* XOR with K2 */ - for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) - cmac_ctx->buf[offset + i] ^= cmac_ctx->K2[i]; - } else { - /* XOR with K1 */ - for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) - cmac_ctx->buf[offset + i] ^= cmac_ctx->K1[i]; - } - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - - se_dev->src_ll->addr = cmac_ctx->buf_dma_addr + offset; - se_dev->src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; - - if (use_orig_iv) { - /* use zero IV, this is when num of bytes is - * less <= block size - */ - memset(piv, 0, TEGRA_SE_AES_IV_SIZE); - ret = tegra_se_send_key_data( - se_dev, piv, TEGRA_SE_AES_IV_SIZE, - cmac_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV, - se_dev->opcode_addr, - cmdbuf, NONE); - } else { - ret = tegra_se_send_key_data( - se_dev, piv, TEGRA_SE_AES_IV_SIZE, - cmac_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_UPDTDIV, - se_dev->opcode_addr, - cmdbuf, NONE); - } - - if (ret) - goto out; - - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - true, cmac_ctx->keylen); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, true, - cmac_ctx->slot->slot_num, 0, use_orig_iv); - - tegra_se_send_data(se_dev, req_ctx, NULL, TEGRA_SE_AES_BLOCK_SIZE, - se_dev->opcode_addr, cmdbuf->addr); - ret = tegra_se_channel_submit_gather( - se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - if (ret) - goto out; - - ret = tegra_se_read_cmac_result(se_dev, req->result, - TEGRA_SE_AES_CMAC_DIGEST_SIZE, false); - if (ret) { - dev_err(se_dev->dev, "failed to read cmac result\n"); - goto out; - } - - cmac_ctx->nbytes = 0; -out: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct tegra_se_aes_cmac_context *ctx = crypto_ahash_ctx(tfm); - struct tegra_se_req_context *req_ctx = NULL; - struct tegra_se_dev *se_dev; - struct tegra_se_slot *pslot; - struct tegra_se_cmdbuf *cmdbuf; - u8 piv[TEGRA_SE_AES_IV_SIZE]; - u32 *pbuf; - dma_addr_t pbuf_adr; - int ret = 0; - u8 const rb = 0x87; - u8 msb; - - se_dev = se_devices[SE_CMAC]; - - mutex_lock(&se_dev->mtx); - - if (!ctx) { - dev_err(se_dev->dev, "invalid context"); - mutex_unlock(&se_dev->mtx); - return -EINVAL; - } - - req_ctx = devm_kzalloc(se_dev->dev, sizeof(struct tegra_se_req_context), - GFP_KERNEL); - if (!req_ctx) { - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - if ((keylen != TEGRA_SE_KEY_128_SIZE) && - (keylen != TEGRA_SE_KEY_192_SIZE) && - (keylen != TEGRA_SE_KEY_256_SIZE)) { - dev_err(se_dev->dev, "invalid key size"); - ret = -EINVAL; - goto free_ctx; - } - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_ctx; - } - ctx->cmdbuf = cmdbuf; - - if (key) { - if (!ctx->slot || - (ctx->slot && - ctx->slot->slot_num == ssk_slot.slot_num)) { - pslot = tegra_se_alloc_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - ret = -ENOMEM; - goto free_ctx; - } - ctx->slot = pslot; - } - ctx->keylen = keylen; - } else { - tegra_se_free_key_slot(ctx->slot); - ctx->slot = &ssk_slot; - ctx->keylen = AES_KEYSIZE_128; - } - - pbuf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &pbuf_adr, GFP_KERNEL); - if (!pbuf) { - dev_err(se_dev->dev, "can not allocate dma buffer"); - ret = -ENOMEM; - goto keyslt_free; - } - memset(pbuf, 0, TEGRA_SE_AES_BLOCK_SIZE); - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - - se_dev->src_ll->addr = pbuf_adr; - se_dev->src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; - se_dev->dst_ll->addr = pbuf_adr; - se_dev->dst_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; - - /* load the key */ - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - ret = tegra_se_send_key_data( - se_dev, (u8 *)key, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_CMAC, se_dev->opcode_addr, - cmdbuf, NONE); - } else { - ret = tegra_se_send_key_data( - se_dev, (u8 *)key, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_KEY, se_dev->opcode_addr, - cmdbuf, NONE); - } - if (ret) { - dev_err(se_dev->dev, - "tegra_se_send_key_data for loading cmac key failed\n"); - goto out; - } - - /* write zero IV */ - memset(piv, 0, TEGRA_SE_AES_IV_SIZE); - - /* load IV */ - ret = tegra_se_send_key_data( - se_dev, piv, TEGRA_SE_AES_IV_SIZE, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_ORGIV, se_dev->opcode_addr, - cmdbuf, NONE); - if (ret) { - dev_err(se_dev->dev, - "tegra_se_send_key_data for loading cmac iv failed\n"); - goto out; - } - - if (se_dev->chipdata->kac_type == SE_KAC_T23X) { - ret = tegra_se_clear_cmac_result(se_dev, - TEGRA_SE_AES_CMAC_DIGEST_SIZE); - if (ret) { - dev_err(se_dev->dev, "failed to clear cmac result\n"); - goto out; - } - } else { - /* config crypto algo */ - req_ctx->config = tegra_se_get_config(se_dev, - SE_AES_OP_MODE_CBC, true, - keylen); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, SE_AES_OP_MODE_CBC, true, - ctx->slot->slot_num, 0, true); - - tegra_se_send_data(se_dev, req_ctx, NULL, - TEGRA_SE_AES_BLOCK_SIZE, se_dev->opcode_addr, - cmdbuf->addr); - ret = tegra_se_channel_submit_gather( - se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - if (ret) { - dev_err(se_dev->dev, - "tegra_se_aes_cmac_setkey:: start op failed\n"); - goto out; - } - - /* compute K1 subkey */ - memcpy(ctx->K1, pbuf, TEGRA_SE_AES_BLOCK_SIZE); - tegra_se_leftshift_onebit(ctx->K1, TEGRA_SE_AES_BLOCK_SIZE, - &msb); - if (msb) - ctx->K1[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; - - /* compute K2 subkey */ - memcpy(ctx->K2, ctx->K1, TEGRA_SE_AES_BLOCK_SIZE); - tegra_se_leftshift_onebit(ctx->K2, TEGRA_SE_AES_BLOCK_SIZE, - &msb); - - if (msb) - ctx->K2[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; - } -out: - if (pbuf) - dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - pbuf, pbuf_adr); -keyslt_free: - if (ret) - tegra_se_free_key_slot(ctx->slot); -free_ctx: - devm_kfree(se_dev->dev, req_ctx); - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_cmac_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); - - cmac_ctx->nbytes = 0; - - return 0; -} - -static int tegra_se_aes_cmac_update(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); - struct tegra_se_dev *se_dev; - u32 num_sgs; - - se_dev = se_devices[SE_CMAC]; - - if ((cmac_ctx->nbytes + req->nbytes) - > (TEGRA_SE_AES_BLOCK_SIZE * 20)) { - dev_err(se_dev->dev, "num of SG buffers bytes are more\n"); - return -EOPNOTSUPP; - } - - num_sgs = tegra_se_count_sgs(req->src, req->nbytes); - - sg_copy_to_buffer(req->src, num_sgs, cmac_ctx->buf + cmac_ctx->nbytes, - req->nbytes); - cmac_ctx->nbytes += req->nbytes; - - return 0; -} - -static int tegra_se_aes_cmac_digest(struct ahash_request *req) -{ - return tegra_se_aes_cmac_init(req) ?: tegra_se_aes_cmac_op(req, true); -} - -static int tegra_se_aes_cmac_final(struct ahash_request *req) -{ - return tegra_se_aes_cmac_op(req, false); -} - -static int tegra_se_aes_cmac_finup(struct ahash_request *req) -{ - return tegra_se_aes_cmac_op(req, true); -} - -static int tegra_se_aes_cmac_cra_init(struct crypto_tfm *tfm) -{ - struct tegra_se_aes_cmac_context *cmac_ctx; - struct tegra_se_dev *se_dev = se_devices[SE_CMAC]; - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct tegra_se_aes_cmac_context)); - cmac_ctx = crypto_tfm_ctx(tfm); - if (!cmac_ctx) { - dev_err(se_dev->dev, "CMAC context not valid\n"); - return -EINVAL; - } - - mutex_lock(&se_dev->mtx); - cmac_ctx->buf = dma_alloc_coherent(se_dev->dev, - (TEGRA_SE_AES_BLOCK_SIZE * 20), - &cmac_ctx->buf_dma_addr, GFP_KERNEL); - if (!cmac_ctx->buf) { - dev_err(se_dev->dev, "Cannot allocate memory to buf\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - cmac_ctx->nbytes = 0; - mutex_unlock(&se_dev->mtx); - - return 0; -} - -static void tegra_se_aes_cmac_cra_exit(struct crypto_tfm *tfm) -{ - struct tegra_se_aes_cmac_context *cmac_ctx = crypto_tfm_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_CMAC]; - - tegra_se_free_key_slot(cmac_ctx->slot); - cmac_ctx->slot = NULL; - - mutex_lock(&se_dev->mtx); - dma_free_coherent(se_dev->dev, (TEGRA_SE_AES_BLOCK_SIZE * 20), - cmac_ctx->buf, cmac_ctx->buf_dma_addr); - mutex_unlock(&se_dev->mtx); -} - -static int tegra_se_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct tegra_se_sha_context *ctx = crypto_ahash_ctx(tfm); - struct tegra_se_req_context *req_ctx = NULL; - struct tegra_se_dev *se_dev; - struct tegra_se_cmdbuf *cmdbuf; - dma_addr_t iova; - u32 *cpuvaddr; - u8 _key[TEGRA_SE_KEY_256_SIZE] = { 0 }; - int ret = 0; - - se_dev = se_devices[SE_SHA]; - mutex_lock(&se_dev->mtx); - - req_ctx = devm_kzalloc(se_dev->dev, - sizeof(struct tegra_se_req_context), - GFP_KERNEL); - if (!req_ctx) { - ret = -ENOMEM; - goto out_mutex; - } - - if (keylen > TEGRA_SE_KEY_256_SIZE) { - dev_err(se_dev->dev, "invalid key size"); - ret = -EINVAL; - goto free_req_ctx; - } - - /* If keylen < TEGRA_SE_KEY_256_SIZE. Appened 0's to the end of key. - * This will allow the key to be stored in the keyslot. - * Since, TEGRA_SE_KEY_256_SIZE < 512-bits which is the minimum sha - * block size. - */ - memcpy(_key, key, keylen); - - if (!key) { - ret = -EINVAL; - goto free_req_ctx; - } - - if (!ctx->slot) { - ctx->slot = tegra_se_alloc_key_slot(); - if (!ctx->slot) { - dev_err(se_dev->dev, "no free key slot\n"); - ret = -ENOMEM; - goto free_req_ctx; - } - } - - ctx->keylen = keylen; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_req_ctx; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - ret = tegra_se_send_key_data(se_dev, _key, TEGRA_SE_KEY_256_SIZE, - ctx->slot->slot_num, SE_KEY_TABLE_TYPE_HMAC, - se_dev->opcode_addr, cmdbuf, NONE); - - if (ret) - dev_err(se_dev->dev, - "tegra_se_send_key_data for loading HMAC key failed\n"); - - if (ret) - tegra_se_free_key_slot(ctx->slot); -free_req_ctx: - devm_kfree(se_dev->dev, req_ctx); -out_mutex: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -/* Security Engine rsa key slot */ -struct tegra_se_rsa_slot { - struct list_head node; - u8 slot_num; /* Key slot number */ - bool available; /* Tells whether key slot is free to use */ -}; - -/* Security Engine AES RSA context */ -struct tegra_se_aes_rsa_context { - struct tegra_se_dev *se_dev; /* Security Engine device */ - struct tegra_se_rsa_slot *slot; /* Security Engine rsa key slot */ - u32 mod_len; - u32 exp_len; -}; - -static void tegra_se_rsa_free_key_slot(struct tegra_se_rsa_slot *slot) -{ - if (slot) { - spin_lock(&rsa_key_slot_lock); - slot->available = true; - spin_unlock(&rsa_key_slot_lock); - } -} - -static struct tegra_se_rsa_slot *tegra_se_alloc_rsa_key_slot(void) -{ - struct tegra_se_rsa_slot *slot = NULL; - bool found = false; - - spin_lock(&rsa_key_slot_lock); - list_for_each_entry(slot, &rsa_key_slot, node) { - if (slot->available) { - slot->available = false; - found = true; - break; - } - } - spin_unlock(&rsa_key_slot_lock); - - return found ? slot : NULL; -} - -static int tegra_init_rsa_key_slot(struct tegra_se_dev *se_dev) -{ - int i; - - se_dev->rsa_slot_list = devm_kzalloc( - se_dev->dev, sizeof(struct tegra_se_rsa_slot) * - TEGRA_SE_RSA_KEYSLOT_COUNT, GFP_KERNEL); - if (!se_dev->rsa_slot_list) - return -ENOMEM; - - spin_lock_init(&rsa_key_slot_lock); - spin_lock(&rsa_key_slot_lock); - for (i = 0; i < TEGRA_SE_RSA_KEYSLOT_COUNT; i++) { - se_dev->rsa_slot_list[i].available = true; - se_dev->rsa_slot_list[i].slot_num = i; - INIT_LIST_HEAD(&se_dev->rsa_slot_list[i].node); - list_add_tail(&se_dev->rsa_slot_list[i].node, &rsa_key_slot); - } - spin_unlock(&rsa_key_slot_lock); - - return 0; -} - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -static unsigned int tegra_se_rsa_max_size(struct crypto_akcipher *tfm) -#else -static int tegra_se_rsa_max_size(struct crypto_akcipher *tfm) -#endif -{ - struct tegra_se_aes_rsa_context *ctx = akcipher_tfm_ctx(tfm); - - if (!ctx) { - pr_err("No RSA context\n"); - return -EINVAL; - } - - return ctx->mod_len; -} - -static int tegra_se_send_rsa_data(struct tegra_se_dev *se_dev, - struct tegra_se_aes_rsa_context *rsa_ctx) -{ - struct tegra_se_cmdbuf *cmdbuf; - u32 cmdbuf_num_words = 0, i = 0; - int err = 0; - u32 val = 0; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_4K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - cmdbuf->size = SZ_4K; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - - val = SE_CONFIG_ENC_ALG(ALG_RSA) | - SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_DST(DST_MEMORY); - cmdbuf->addr[i++] = __nvhost_opcode_incr(se_dev->opcode_addr, 8); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = RSA_KEY_SLOT(rsa_ctx->slot->slot_num); - cmdbuf->addr[i++] = (rsa_ctx->mod_len / 64) - 1; - cmdbuf->addr[i++] = (rsa_ctx->exp_len / 4); - cmdbuf->addr[i++] = (u32)(se_dev->src_ll->addr); - cmdbuf->addr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(se_dev->src_ll->addr)) | - SE_ADDR_HI_SZ(se_dev->src_ll->data_len)); - cmdbuf->addr[i++] = (u32)(se_dev->dst_ll->addr); - cmdbuf->addr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(se_dev->dst_ll->addr)) | - SE_ADDR_HI_SZ(se_dev->dst_ll->data_len)); - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_START); - - cmdbuf_num_words = i; - - err = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, cmdbuf_num_words, - NONE); - return err; -} - -static int tegra_se_rsa_setkey(struct crypto_akcipher *tfm, const void *key, - unsigned int keylen) -{ - struct tegra_se_aes_rsa_context *ctx = akcipher_tfm_ctx(tfm); - struct tegra_se_dev *se_dev; - struct tegra_se_cmdbuf *cmdbuf; - u32 module_key_length = 0; - u32 exponent_key_length = 0; - u32 pkt, val; - u32 key_size_words; - u32 key_word_size = 4; - u32 *pkeydata = (u32 *)key; - s32 j = 0; - struct tegra_se_rsa_slot *pslot; - u32 cmdbuf_num_words = 0, i = 0; - int err = 0; - int timeout = 0; - - se_dev = se_devices[SE_RSA]; - - if (!ctx || !key) { - dev_err(se_dev->dev, "No RSA context or Key\n"); - return -EINVAL; - } - - /* Allocate rsa key slot */ - if (!ctx->slot) { - for (timeout = 0; timeout < SE_KEYSLOT_TIMEOUT; timeout++) { - pslot = tegra_se_alloc_rsa_key_slot(); - if (!pslot) { - mdelay(SE_KEYSLOT_MDELAY); - continue; - } else { - break; - } - } - - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - return -ENOMEM; - } else { - ctx->slot = pslot; - } - } - - module_key_length = (keylen >> 16); - exponent_key_length = (keylen & (0xFFFF)); - - if (!(((module_key_length / 64) >= 1) && - ((module_key_length / 64) <= 4))) { - tegra_se_rsa_free_key_slot(ctx->slot); - dev_err(se_dev->dev, "Invalid RSA modulus length\n"); - return -EDOM; - } - - ctx->mod_len = module_key_length; - ctx->exp_len = exponent_key_length; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_64K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - cmdbuf->size = SZ_64K; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - - if (exponent_key_length) { - key_size_words = (exponent_key_length / key_word_size); - /* Write exponent */ - for (j = (key_size_words - 1); j >= 0; j--) { - pkt = RSA_KEY_NUM(ctx->slot->slot_num) | - RSA_KEY_TYPE(RSA_KEY_TYPE_EXP) | - RSA_KEY_PKT_WORD_ADDR(j); - val = SE_RSA_KEYTABLE_PKT(pkt); - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_ADDR_OFFSET, 1); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_DATA_OFFSET, 1); - cmdbuf->addr[i++] = *pkeydata++; - } - } - - if (module_key_length) { - key_size_words = (module_key_length / key_word_size); - /* Write modulus */ - for (j = (key_size_words - 1); j >= 0; j--) { - pkt = RSA_KEY_NUM(ctx->slot->slot_num) | - RSA_KEY_TYPE(RSA_KEY_TYPE_MOD) | - RSA_KEY_PKT_WORD_ADDR(j); - val = SE_RSA_KEYTABLE_PKT(pkt); - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_ADDR_OFFSET, 1); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_DATA_OFFSET, 1); - cmdbuf->addr[i++] = *pkeydata++; - } - } - - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_DUMMY); - cmdbuf_num_words = i; - - mutex_lock(&se_dev->mtx); - err = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, cmdbuf_num_words, - NONE); - mutex_unlock(&se_dev->mtx); - if (err) - tegra_se_rsa_free_key_slot(ctx->slot); - - return err; -} - -static int tegra_se_rsa_op(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = NULL; - struct tegra_se_aes_rsa_context *rsa_ctx = NULL; - struct tegra_se_dev *se_dev; - u32 num_src_sgs, num_dst_sgs; - int ret1 = 0, ret2 = 0; - - se_dev = se_devices[SE_RSA]; - - if (!req) { - dev_err(se_dev->dev, "Invalid RSA request\n"); - return -EINVAL; - } - - tfm = crypto_akcipher_reqtfm(req); - if (!tfm) { - dev_err(se_dev->dev, "Invalid RSA transform\n"); - return -EINVAL; - } - - rsa_ctx = akcipher_tfm_ctx(tfm); - if (!rsa_ctx || !rsa_ctx->slot) { - dev_err(se_dev->dev, "Invalid RSA context\n"); - return -EINVAL; - } - - if ((req->src_len < TEGRA_SE_RSA512_INPUT_SIZE) || - (req->src_len > TEGRA_SE_RSA2048_INPUT_SIZE)) { - dev_err(se_dev->dev, "RSA src input length not in range\n"); - return -EDOM; - } - - if ((req->dst_len < TEGRA_SE_RSA512_INPUT_SIZE) || - (req->dst_len > TEGRA_SE_RSA2048_INPUT_SIZE)) { - dev_err(se_dev->dev, "RSA dst input length not in range\n"); - return -EDOM; - } - - if (req->src_len != rsa_ctx->mod_len) { - dev_err(se_dev->dev, "Invalid RSA src input length\n"); - return -EINVAL; - } - - num_src_sgs = tegra_se_count_sgs(req->src, req->src_len); - num_dst_sgs = tegra_se_count_sgs(req->dst, req->dst_len); - if ((num_src_sgs > SE_MAX_SRC_SG_COUNT) || - (num_dst_sgs > SE_MAX_DST_SG_COUNT)) { - dev_err(se_dev->dev, "num of SG buffers are more\n"); - return -EDOM; - } - - mutex_lock(&se_dev->mtx); - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - - if (req->src == req->dst) { - se_dev->dst_ll = se_dev->src_ll; - ret1 = tegra_map_sg(se_dev->dev, req->src, 1, DMA_BIDIRECTIONAL, - se_dev->src_ll, req->src_len); - if (!ret1) { - mutex_unlock(&se_dev->mtx); - return -EINVAL; - } - } else { - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - ret1 = tegra_map_sg(se_dev->dev, req->src, 1, DMA_TO_DEVICE, - se_dev->src_ll, req->src_len); - ret2 = tegra_map_sg(se_dev->dev, req->dst, 1, DMA_FROM_DEVICE, - se_dev->dst_ll, req->dst_len); - if (!ret1 || !ret2) { - mutex_unlock(&se_dev->mtx); - return -EINVAL; - } - } - - ret1 = tegra_se_send_rsa_data(se_dev, rsa_ctx); - if (ret1) - dev_err(se_dev->dev, "RSA send data failed err = %d\n", ret1); - - if (req->src == req->dst) { - tegra_unmap_sg(se_dev->dev, req->src, DMA_BIDIRECTIONAL, - req->src_len); - } else { - tegra_unmap_sg(se_dev->dev, req->src, DMA_TO_DEVICE, - req->src_len); - tegra_unmap_sg(se_dev->dev, req->dst, DMA_FROM_DEVICE, - req->dst_len); - } - - mutex_unlock(&se_dev->mtx); - return ret1; -} - -static void tegra_se_rsa_exit(struct crypto_akcipher *tfm) -{ - struct tegra_se_aes_rsa_context *ctx = akcipher_tfm_ctx(tfm); - - tegra_se_rsa_free_key_slot(ctx->slot); - ctx->slot = NULL; -} - -static inline struct tegra_se_dh_context *tegra_se_dh_get_ctx( - struct crypto_kpp *tfm) -{ - return kpp_tfm_ctx(tfm); -} - -static int tegra_se_dh_check_params_length(unsigned int p_len) -{ - if (p_len < MIN_DH_SZ_BITS) { - pr_err("DH Modulus length not in range\n"); - return -EDOM; - } - - return 0; -} - -static int tegra_se_dh_set_params(struct tegra_se_dh_context *ctx, - struct dh *params) -{ - int ret = 0; - - ret = tegra_se_dh_check_params_length(params->p_size << 3); - if (ret) - return ret; - - ctx->key = (void *)params->key; - ctx->key_size = params->key_size; - if (!ctx->key) { - dev_err(ctx->se_dev->dev, "Invalid DH Key\n"); - return -ENODATA; - } - - ctx->p = (void *)params->p; - ctx->p_size = params->p_size; - if (!ctx->p) { - dev_err(ctx->se_dev->dev, "Invalid DH Modulus\n"); - return -ENODATA; - } - - ctx->g = (void *)params->g; - ctx->g_size = params->g_size; - if (!ctx->g) { - dev_err(ctx->se_dev->dev, "Invalid DH generator\n"); - return -ENODATA; - } - - if (ctx->g_size > ctx->p_size) { - dev_err(ctx->se_dev->dev, "Invalid DH generator size\n"); - return -EDOM; - } - - return 0; -} - -static int tegra_se_dh_setkey(struct crypto_kpp *tfm) -{ - struct tegra_se_dh_context *ctx = tegra_se_dh_get_ctx(tfm); - struct tegra_se_dev *se_dev; - struct tegra_se_cmdbuf *cmdbuf; - u32 module_key_length = 0; - u32 exponent_key_length = 0; - u32 pkt, val; - u32 key_size_words; - u32 key_word_size = 4; - u32 *pkeydata; - struct tegra_se_rsa_slot *pslot; - u32 cmdbuf_num_words = 0; - int i = 0, err, j; - - if (!ctx) { - pr_err("Invalid DH context\n"); - return -EINVAL; - } - - se_dev = ctx->se_dev; - pkeydata = (u32 *)ctx->key; - - /* Allocate rsa key slot */ - if (!ctx->slot) { - pslot = tegra_se_alloc_rsa_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - return -ENOMEM; - } - ctx->slot = pslot; - } - - module_key_length = ctx->p_size; - exponent_key_length = ctx->key_size; - - if (!(((module_key_length / 64) >= 1) && - ((module_key_length / 64) <= 4))) { - tegra_se_rsa_free_key_slot(ctx->slot); - dev_err(se_dev->dev, "DH Modulus length not in range\n"); - return -EDOM; - } - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_64K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - cmdbuf->size = SZ_64K; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - - if (exponent_key_length) { - key_size_words = (exponent_key_length / key_word_size); - /* Write exponent */ - for (j = (key_size_words - 1); j >= 0; j--) { - pkt = RSA_KEY_NUM(ctx->slot->slot_num) | - RSA_KEY_TYPE(RSA_KEY_TYPE_EXP) | - RSA_KEY_PKT_WORD_ADDR(j); - val = SE_RSA_KEYTABLE_PKT(pkt); - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_ADDR_OFFSET, 1); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_DATA_OFFSET, 1); - cmdbuf->addr[i++] = be32_to_cpu(*pkeydata++); - } - } - - if (module_key_length) { - pkeydata = (u32 *)ctx->p; - key_size_words = (module_key_length / key_word_size); - /* Write modulus */ - for (j = (key_size_words - 1); j >= 0; j--) { - pkt = RSA_KEY_NUM(ctx->slot->slot_num) | - RSA_KEY_TYPE(RSA_KEY_TYPE_MOD) | - RSA_KEY_PKT_WORD_ADDR(j); - val = SE_RSA_KEYTABLE_PKT(pkt); - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_ADDR_OFFSET, 1); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + - SE_RSA_KEYTABLE_DATA_OFFSET, 1); - cmdbuf->addr[i++] = be32_to_cpu(*pkeydata++); - } - } - - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_DUMMY); - cmdbuf_num_words = i; - - err = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, cmdbuf_num_words, - NONE); - if (err) { - dev_err(se_dev->dev, "%s: channel_submit failed\n", __func__); - tegra_se_rsa_free_key_slot(ctx->slot); - } - - return err; -} - -static void tegra_se_fix_endianness(struct tegra_se_dev *se_dev, - struct scatterlist *sg, u32 num_sgs, - unsigned int nbytes, bool be) -{ - int j, k; - - sg_copy_to_buffer(sg, num_sgs, se_dev->dh_buf1, nbytes); - - for (j = (nbytes / 4 - 1), k = 0; j >= 0; j--, k++) { - if (be) - se_dev->dh_buf2[k] = be32_to_cpu(se_dev->dh_buf1[j]); - else - se_dev->dh_buf2[k] = cpu_to_be32(se_dev->dh_buf1[j]); - } - - sg_copy_from_buffer(sg, num_sgs, se_dev->dh_buf2, nbytes); -} - -static int tegra_se_dh_compute_value(struct kpp_request *req) -{ - struct crypto_kpp *tfm = NULL; - struct tegra_se_dh_context *dh_ctx = NULL; - struct tegra_se_dev *se_dev; - struct scatterlist *src_sg; - struct tegra_se_cmdbuf *cmdbuf; - u32 num_src_sgs, num_dst_sgs; - u8 *base_buff = NULL; - struct scatterlist src; - u32 cmdbuf_num_words = 0, i = 0; - int err, j; - unsigned int total, zpad_sz; - u32 val; - - if (!req) { - pr_err("Invalid DH request\n"); - return -EINVAL; - } - - tfm = crypto_kpp_reqtfm(req); - if (!tfm) { - pr_err("Invalid DH transform\n"); - return -EINVAL; - } - - dh_ctx = tegra_se_dh_get_ctx(tfm); - if (!dh_ctx || !dh_ctx->slot) { - pr_err("Invalid DH context\n"); - return -EINVAL; - } - - se_dev = dh_ctx->se_dev; - - if (req->src) { - src_sg = req->src; - total = req->src_len; - } else { - if (dh_ctx->g_size < dh_ctx->p_size) { - base_buff = (u8 *)devm_kzalloc(se_dev->dev, - dh_ctx->p_size, GFP_KERNEL); - if (!base_buff) - return -ENOMEM; - zpad_sz = dh_ctx->p_size - dh_ctx->g_size; - - for (j = 0; j < zpad_sz; j++) - base_buff[j] = 0x0; - for (j = zpad_sz; j < dh_ctx->p_size; j++) - base_buff[j] = *(u8 *)(dh_ctx->g++); - - dh_ctx->g_size = dh_ctx->p_size; - } else { - base_buff = (u8 *)devm_kzalloc(se_dev->dev, - dh_ctx->g_size, GFP_KERNEL); - if (!base_buff) - return -ENOMEM; - memcpy(base_buff, (u8 *)(dh_ctx->g), dh_ctx->g_size); - } - - sg_init_one(&src, base_buff, dh_ctx->g_size); - - src_sg = &src; - total = dh_ctx->g_size; - } - - num_src_sgs = tegra_se_count_sgs(src_sg, total); - num_dst_sgs = tegra_se_count_sgs(req->dst, req->dst_len); - if ((num_src_sgs > SE_MAX_SRC_SG_COUNT) || - (num_dst_sgs > SE_MAX_DST_SG_COUNT)) { - dev_err(se_dev->dev, "num of SG buffers are more\n"); - err = -EDOM; - goto free; - } - - tegra_se_fix_endianness(se_dev, src_sg, num_src_sgs, total, true); - - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - - err = tegra_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE, - se_dev->src_ll, total); - if (!err) { - err = -EINVAL; - goto free; - } - err = tegra_map_sg(se_dev->dev, req->dst, 1, DMA_FROM_DEVICE, - se_dev->dst_ll, req->dst_len); - if (!err) { - err = -EINVAL; - goto unmap_src; - } - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_4K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - err = PTR_ERR(cmdbuf); - goto unmap_dst; - } - - cmdbuf->size = SZ_4K; - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - - val = SE_CONFIG_ENC_ALG(ALG_RSA) | SE_CONFIG_DEC_ALG(ALG_NOP) | - SE_CONFIG_DST(DST_MEMORY); - cmdbuf->addr[i++] = __nvhost_opcode_incr(se_dev->opcode_addr, 8); - cmdbuf->addr[i++] = val; - cmdbuf->addr[i++] = RSA_KEY_SLOT(dh_ctx->slot->slot_num); - cmdbuf->addr[i++] = (dh_ctx->p_size / 64) - 1; - cmdbuf->addr[i++] = (dh_ctx->key_size / 4); - cmdbuf->addr[i++] = (u32)(se_dev->src_ll->addr); - cmdbuf->addr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(se_dev->src_ll->addr)) | - SE_ADDR_HI_SZ(se_dev->src_ll->data_len)); - cmdbuf->addr[i++] = (u32)(se_dev->dst_ll->addr); - cmdbuf->addr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(se_dev->dst_ll->addr)) | - SE_ADDR_HI_SZ(se_dev->dst_ll->data_len)); - - cmdbuf->addr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_RSA_OPERATION_OFFSET, 1); - cmdbuf->addr[i++] = SE_OPERATION_WRSTALL(WRSTALL_TRUE) | - SE_OPERATION_LASTBUF(LASTBUF_TRUE) | - SE_OPERATION_OP(OP_START); - - cmdbuf_num_words = i; - - err = tegra_se_channel_submit_gather( - se_dev, cmdbuf, 0, - cmdbuf_num_words, NONE); - if (err) { - dev_err(se_dev->dev, "%s: channel_submit failed\n", __func__); - goto unmap_dst; - } - - tegra_se_fix_endianness(se_dev, req->dst, num_dst_sgs, - req->dst_len, false); -unmap_dst: - tegra_unmap_sg(se_dev->dev, req->dst, DMA_FROM_DEVICE, req->dst_len); -unmap_src: - tegra_unmap_sg(se_dev->dev, src_sg, DMA_TO_DEVICE, total); -free: - if (!req->src) - devm_kfree(se_dev->dev, base_buff); - - return err; -} - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -static int tegra_se_dh_set_secret(struct crypto_kpp *tfm, - const void *buf, - unsigned int len) -#else -static int tegra_se_dh_set_secret(struct crypto_kpp *tfm, void *buf, - unsigned int len) -#endif -{ - int ret = 0; - - struct tegra_se_dh_context *ctx = tegra_se_dh_get_ctx(tfm); - struct dh params; - - ctx->se_dev = se_devices[SE_RSA]; - - ret = crypto_dh_decode_key(buf, len, ¶ms); - if (ret) { - dev_err(ctx->se_dev->dev, "failed to decode DH input\n"); - return ret; - } - - ret = tegra_se_dh_set_params(ctx, ¶ms); - if (ret) - return ret; - - ret = tegra_se_dh_setkey(tfm); - if (ret) - return ret; - - return 0; -} - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -static unsigned int tegra_se_dh_max_size(struct crypto_kpp *tfm) -#else -static int tegra_se_dh_max_size(struct crypto_kpp *tfm) -#endif -{ - struct tegra_se_dh_context *ctx = tegra_se_dh_get_ctx(tfm); - - return ctx->p_size; -} - -static void tegra_se_dh_exit_tfm(struct crypto_kpp *tfm) -{ - struct tegra_se_dh_context *ctx = tegra_se_dh_get_ctx(tfm); - - tegra_se_rsa_free_key_slot(ctx->slot); - - ctx->key = NULL; - ctx->p = NULL; - ctx->g = NULL; -} - -static int tegra_se_aes_ccm_setkey(struct crypto_aead *tfm, const u8 *key, - unsigned int keylen) -{ - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev; - struct tegra_se_slot *pslot; - struct tegra_se_cmdbuf *cmdbuf; - u32 *cpuvaddr = NULL; - dma_addr_t iova = 0; - int ret = 0; - - se_dev = se_devices[SE_AEAD]; - - mutex_lock(&se_dev->mtx); - - /* Check for valid arguments */ - if (!ctx || !key) { - dev_err(se_dev->dev, "invalid context or key"); - ret = -EINVAL; - goto free_ctx; - } - - if ((keylen != TEGRA_SE_KEY_128_SIZE) && - (keylen != TEGRA_SE_KEY_192_SIZE) && - (keylen != TEGRA_SE_KEY_256_SIZE)) { - dev_err(se_dev->dev, "invalid key size"); - ret = -EINVAL; - goto free_ctx; - } - - /* Get free key slot */ - pslot = tegra_se_alloc_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - ret = -ENOMEM; - goto free_ctx; - } - ctx->slot = pslot; - ctx->keylen = keylen; - - /* Get free command buffer */ - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_ctx; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - ret = tegra_se_send_key_data( - se_dev, (u8 *)key, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_KEY, se_dev->opcode_addr, - cmdbuf, NONE); - - tegra_se_free_key_slot(ctx->slot); - -free_ctx: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_ccm_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - - switch (authsize) { - case 4: - case 8: - case 10: - case 12: - case 14: - case 16: - ctx->authsize = authsize; - break; - default: - return -EINVAL; - } - - return 0; -} - -static inline int tegra_se_ccm_check_iv(const u8 *iv) -{ - /* iv[0] gives value of q-1 - * 2 <= q <= 8 as per NIST 800-38C notation - * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation - */ - if (iv[0] < 1 || iv[0] > 7) { - pr_err("ccm_ccm_check_iv failed %d\n", iv[0]); - return -EINVAL; - } - - return 0; -} - -static int tegra_se_ccm_init_crypt(struct aead_request *req) -{ - u8 *iv = req->iv; - int err; - - err = tegra_se_ccm_check_iv(iv); - if (err) - return err; - - /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of - * zero to encrypt auth tag. - * req->iv has the formatted ctr_0 (i.e. Flags || N || 0). - */ - memset(iv + 15 - iv[0], 0, iv[0] + 1); - - return 0; -} - -static int ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) -{ - __be32 data; - - memset(block, 0, csize); - block += csize; - - if (csize >= 4) - csize = 4; - else if (msglen > (1 << (8 * csize))) - return -EOVERFLOW; - - data = cpu_to_be32(msglen); - memcpy(block - csize, (u8 *)&data + 4 - csize, csize); - - return 0; -} - -/** - * @brief Encode B0 block as per below format (16 bytes): - * - * +----------------------------------------------------+ - * | Octet # | 0 | [1 .. (15-q)] | [(16-q) .. 15] | - * +----------------------------------------------------+ - * | Content | Flags | N | Q | - * +----------------------------------------------------+ - * - * 1. "Flags" octet format (byte 0 of big endian B0): - * - * +-------------------------------------------------------------------------+ - * | Bit no. | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | - * +-------------------------------------------------------------------------+ - * | Content |reserved| Adata | (t-2)/2 | q-1 | - * +-------------------------------------------------------------------------+ - * - * Bit 7 => always zero - * Bit 6 => flag for AAD presence - * Bits [5..3] => one of the valid values of element t (mac length) - * Bits [2..0] => one of the valid values of element q (bytes needs to encode - * message length), already present in iv[0]. - * - * 2. N - nounce present in req->iv. - * - * 3. Q - contains the msg length(in bytes) in q octet/bytes space. - * - */ -static int ccm_encode_b0(u8 *cinfo, struct aead_request *req, - unsigned int cryptlen) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - unsigned int q, t; - u8 *Q_ptr; - int ret; - - memcpy(cinfo, req->iv, 16); - - /*** 1. Prepare Flags Octet ***/ - - /* Adata */ - if (req->assoclen) - *cinfo |= (1 << 6); - - /* Encode t (mac length) */ - t = ctx->authsize; - *cinfo |= (((t-2)/2) << 3); - - /* q already present in iv[0], do nothing */ - - /*** 2. N is already present in iv, do nothing ***/ - - /*** 3. Encode Q - message length ***/ - q = req->iv[0] + 1; - Q_ptr = cinfo + 16 - q; - - ret = ccm_set_msg_len(Q_ptr, cryptlen, q); - - return ret; -} - -/** - * @brief Encode AAD length - * - * Function returns the number of bytes used in encoding, - * the specs set the AAD length encoding limits as follows: - * - * (0) < a < (2^16-2^8) => two bytes, no prefix - * (2^16 - 2^8) <= a < (2^32) => six bytes, prefix: 0xFF || 0xFE - * - * Note that zero length AAD is not encoded at all; bit 6 - * in B0 flags is zero if there is no AAD. - */ -static int ccm_encode_adata_len(u8 *adata, unsigned int a) -{ - int len = 0; - - /* add control info for associated data - * RFC 3610 and NIST Special Publication 800-38C - */ - - if (a < 65280) { - *(__be16 *)adata = cpu_to_be16(a); - len = 2; - } else { - *(__be16 *)adata = cpu_to_be16(0xfffe); - *(__be32 *)&adata[2] = cpu_to_be32(a); - len = 6; - } - - return len; -} - -static int tegra_se_ccm_compute_auth(struct aead_request *req, bool encrypt) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - unsigned int assoclen, cryptlen; - struct scatterlist *sg, *sg_start; - unsigned int ilen, pad_bytes_len, pt_bytes = 0, total = 0; - u32 num_sgs, mapped_len = 0, *cpuvaddr = NULL; - struct tegra_se_dev *se_dev; - struct tegra_se_ll *src_ll; - struct tegra_se_cmdbuf *cmdbuf; - dma_addr_t pt_addr, adata_addr, iova = 0; - u8 *buf, *adata = NULL; - int ret, count; - - se_dev = se_devices[SE_AEAD]; - - /* 0 - Setup */ - assoclen = req->assoclen; - sg = req->src; - if (encrypt) - cryptlen = req->cryptlen; - else - cryptlen = req->cryptlen - ctx->authsize; - - /* 1 - Formatting of the Control Information and the Nonce */ - buf = ctx->buf[0]; - ret = ccm_encode_b0(buf, req, cryptlen); - if (ret) - return -EINVAL; - - /* 1.1 - Map B0 block */ - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - src_ll->addr = ctx->buf_addr[0]; - src_ll->data_len = 16; - src_ll++; - total += 16; - - /* 2 - Formatting of the Associated Data */ - if (assoclen) { - buf = ctx->buf[1]; - - /* 2.0 - Prepare alen encoding */ - ilen = ccm_encode_adata_len(buf, assoclen); - - /* 2.1 - Map alen encoding */ - src_ll->addr = ctx->buf_addr[1]; - src_ll->data_len = ilen; - src_ll++; - total += ilen; - - /* 2.2 - Copy adata and map it */ - adata = dma_alloc_coherent(se_dev->dev, assoclen, &adata_addr, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - num_sgs = tegra_se_count_sgs(sg, assoclen); - sg_copy_to_buffer(sg, num_sgs, adata, assoclen); - - src_ll->addr = adata_addr; - src_ll->data_len = assoclen; - src_ll++; - total += assoclen; - - /* 2.3 - Prepare and Map padding zero bytes data */ - pad_bytes_len = 16 - (assoclen + ilen) % 16; - if (pad_bytes_len) { - memset(ctx->buf[2], 0, pad_bytes_len); - src_ll->addr = ctx->buf_addr[2]; - src_ll->data_len = pad_bytes_len; - src_ll++; - total += pad_bytes_len; - } - } - - /* 3 - Formatting of plain text */ - /* In case of ccm decryption plain text is present in dst buffer */ - sg = encrypt ? req->src : req->dst; - sg_start = sg; - - /* 3.1 Iterative over sg, to skip associated data */ - count = assoclen; - while (count) { - /* If a SG has both assoc data and PT data - * make note of it and use for PT mac computation task. - */ - if (count < sg->length) { - ret = dma_map_sg(se_dev->dev, sg, 1, DMA_TO_DEVICE); - if (!ret) { - dev_err(se_dev->dev, "dma_map_sg error\n"); - goto out; - } - pt_addr = sg_dma_address(sg) + count; - /* SG length can be more than cryptlen and associated - * data, in this case encrypted length should be - * cryptlen. - */ - if (cryptlen + count <= sg->length) - pt_bytes = cryptlen; - else - pt_bytes = sg->length - count; - - sg_start = sg; - mapped_len = sg->length; - sg = sg_next(sg); - break; - } - count -= min_t(size_t, (size_t)sg->length, - (size_t)count); - sg = sg_next(sg); - } - - /* 3.2 Map the plain text buffer */ - if (pt_bytes) { - src_ll->addr = pt_addr; - src_ll->data_len = pt_bytes; - src_ll++; - } - - count = cryptlen - pt_bytes; - while (count) { - ret = dma_map_sg(se_dev->dev, sg, 1, DMA_TO_DEVICE); - if (!ret) { - dev_err(se_dev->dev, "dma_map_sg error\n"); - goto out; - } - src_ll->addr = sg_dma_address(sg); - src_ll->data_len = min_t(size_t, - (size_t)sg->length, (size_t)count); - count -= min_t(size_t, (size_t)sg->length, (size_t)count); - mapped_len += sg->length; - sg = sg_next(sg); - src_ll++; - } - total += cryptlen; - - /* 3.3 - Map padding data */ - pad_bytes_len = 16 - (cryptlen % 16); - if (pad_bytes_len) { - memset(ctx->buf[3], 0, pad_bytes_len); - src_ll->addr = ctx->buf_addr[3]; - src_ll->data_len = pad_bytes_len; - src_ll++; - } - total += pad_bytes_len; - - /* 4 - Compute CBC_MAC */ - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto out; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - se_dev->dst_ll = se_dev->src_ll; /* dummy */ - - req_ctx->op_mode = SE_AES_OP_MODE_CBC_MAC; - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - 0, ctx->keylen); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, 0, - ctx->slot->slot_num, 0, true); - - tegra_se_send_data(se_dev, req_ctx, NULL, total, se_dev->opcode_addr, - cpuvaddr); - ret = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - - /* 4.1 - Read result */ - ret = tegra_se_read_cmac_result(se_dev, ctx->mac, - TEGRA_SE_AES_CBC_MAC_DIGEST_SIZE, false); - if (ret) { - dev_err(se_dev->dev, "failed to read cmac result\n"); - goto out; - } - - ret = tegra_se_clear_cmac_result(se_dev, - TEGRA_SE_AES_CBC_MAC_DIGEST_SIZE); - if (ret) { - dev_err(se_dev->dev, "failed to clear cmac result\n"); - goto out; - } - - /* 5 - Clean up */ - tegra_unmap_sg(se_dev->dev, sg_start, DMA_TO_DEVICE, mapped_len); -out: - if (assoclen) - dma_free_coherent(se_dev->dev, assoclen, adata, adata_addr); - - return ret; -} - -static int ccm_ctr_extract_encrypted_mac(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - u32 num_sgs; - - /* Extract cipher mac from buf */ - memset(ctx->enc_mac, 0, 16); /* Padding zeros will be taken care */ - - num_sgs = tegra_se_count_sgs(req->src, req->cryptlen + req->assoclen); - sg_pcopy_to_buffer(req->src, num_sgs, ctx->enc_mac, - ctx->authsize, - req->cryptlen + req->assoclen - ctx->authsize); - - return 0; -} - -static int tegra_se_ccm_ctr(struct aead_request *req, bool encrypt) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - struct tegra_se_ll *src_ll, *dst_ll; - struct tegra_se_cmdbuf *cmdbuf; - struct scatterlist *src_sg, *dst_sg, *src_sg_start; - unsigned int assoclen, cryptlen, total, pad_bytes_len; - unsigned int mapped_cryptlen, mapped_len; - int ret = 0, count, index = 0, num_sgs; - struct tegra_se_dev *se_dev; - u8 *dst_buf; - dma_addr_t dst_buf_dma_addr; - - se_dev = se_devices[SE_AEAD]; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, - SZ_16K * SE_MAX_SUBMIT_CHAIN_SZ); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - /* 0. Setup */ - assoclen = req->assoclen; - cryptlen = req->cryptlen; - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - src_ll = se_dev->src_ll; - dst_ll = se_dev->dst_ll; - total = 0; mapped_cryptlen = 0; mapped_len = 0; - if (encrypt) - cryptlen = req->cryptlen; - else - cryptlen = req->cryptlen - ctx->authsize; - - /* If destination buffer is scattered over multiple sg, destination - * buffer splits need not be same as src_sg so create - * separate buffer for destination. Onebyte extra to handle zero - * cryptlen case. - */ - dst_buf = dma_alloc_coherent(se_dev->dev, cryptlen+1, &dst_buf_dma_addr, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC); - if (!dst_buf) - return -ENOMEM; - - - /* 1. Add mac to src */ - if (encrypt) { - /* 1.1 Add mac to src_ll list */ - src_ll->addr = ctx->mac_addr; - src_ll->data_len = 16; - src_ll++; - total += 16; - - /* 1.2 Add corresponding entry to dst_ll list */ - dst_ll->addr = ctx->enc_mac_addr; - dst_ll->data_len = 16; - dst_ll++; - } else { - /* 1.0 Extract encrypted mac to ctx->enc_mac */ - ccm_ctr_extract_encrypted_mac(req); - - /* 1.1 Add encrypted mac to src_ll list */ - src_ll->addr = ctx->enc_mac_addr; - src_ll->data_len = 16; - src_ll++; - total += 16; - - /* 1.2 Add corresponding entry to dst_ll list */ - memset(ctx->dec_mac, 0, 16); - dst_ll->addr = ctx->dec_mac_addr; - dst_ll->data_len = 16; - dst_ll++; - } - - /* 2. Add plain text to src */ - src_sg = req->src; - src_sg_start = src_sg; - dst_sg = req->dst; - /* 2.1 Iterative over assoclen, to skip it for encryption */ - count = assoclen; - while (count) { - /* If a SG has both assoc data and PT/CT data */ - if (count < src_sg->length) { - ret = dma_map_sg(se_dev->dev, src_sg, 1, - DMA_TO_DEVICE); - if (!ret) { - pr_err("dma_map_sg error\n"); - goto out; - } - - /* Fill PT/CT src list details from this sg */ - src_ll->addr = sg_dma_address(src_sg) + count; - /* SG length can be more than cryptlen and associated - * data, in this case encrypted length should be - * cryptlen. - */ - if (cryptlen + count <= src_sg->length) - src_ll->data_len = cryptlen; - else - src_ll->data_len = src_sg->length - count; - mapped_cryptlen = src_ll->data_len; - mapped_len = src_sg->length; - - /* Fill dst list details */ - dst_ll->addr = dst_buf_dma_addr; - dst_ll->data_len = src_ll->data_len; - index += src_ll->data_len; - dst_ll++; - src_ll++; - - /* Store from where src_sg is mapped, so it can be - * unmapped. - */ - src_sg_start = src_sg; - src_sg = sg_next(src_sg); - break; - } - - /* If whole SG is associated data iterate over it */ - count -= min_t(size_t, (size_t)src_sg->length, (size_t)count); - src_sg = sg_next(src_sg); - } - - /* 2.2 Add plain text to src_ll and dst_ll list */ - count = cryptlen - mapped_cryptlen; - while (count) { - ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); - if (!ret) { - dev_err(se_dev->dev, "dma_map_sg error\n"); - goto out; - } - src_ll->addr = sg_dma_address(src_sg); - src_ll->data_len = min_t(size_t, - (size_t)src_sg->length, (size_t)count); - dst_ll->addr = dst_buf_dma_addr + index; - dst_ll->data_len = src_ll->data_len; - index += dst_ll->data_len; - count -= min_t(size_t, (size_t)src_sg->length, (size_t)count); - mapped_len += src_sg->length; - src_sg = sg_next(src_sg); - src_ll++; - dst_ll++; - } - total += cryptlen; - - /* 3. Pad necessary zeros */ - memset(ctx->buf[0], 0, 16); - pad_bytes_len = 16 - (cryptlen % 16); - if (pad_bytes_len) { - src_ll->addr = ctx->buf_addr[0]; - src_ll->data_len = pad_bytes_len; - src_ll++; - dst_ll->addr = ctx->buf_addr[0]; - dst_ll->data_len = pad_bytes_len; - dst_ll++; - } - total += pad_bytes_len; - - /* 4. Encrypt/Decrypt using CTR */ - req_ctx->op_mode = SE_AES_OP_MODE_CTR; - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - 0, ctx->keylen); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, 0, - ctx->slot->slot_num, 0, true); - - tegra_se_send_ctr_seed(se_dev, (u32 *)req->iv, se_dev->opcode_addr, - cmdbuf->addr); - - tegra_se_send_data(se_dev, req_ctx, NULL, total, se_dev->opcode_addr, - cmdbuf->addr); - ret = tegra_se_channel_submit_gather(se_dev, - cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - - num_sgs = tegra_se_count_sgs(req->dst, assoclen + cryptlen); - sg_pcopy_from_buffer(req->dst, num_sgs, dst_buf, cryptlen, - assoclen); -out: - /* 5 Clean up */ - tegra_unmap_sg(se_dev->dev, src_sg_start, DMA_TO_DEVICE, mapped_len); - dma_free_coherent(se_dev->dev, cryptlen+1, dst_buf, dst_buf_dma_addr); - - return ret; -} - -static void ccm_ctr_add_encrypted_mac_to_dest(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct scatterlist *dst_sg; - u32 num_sgs; - - dst_sg = req->dst; - num_sgs = tegra_se_count_sgs(dst_sg, - req->assoclen + req->cryptlen + ctx->authsize); - sg_pcopy_from_buffer(dst_sg, num_sgs, ctx->enc_mac, - ctx->authsize, req->assoclen + req->cryptlen); - -} - -/* - * CCM Generation-Encryption operation. - * - * Input: - * valid nonce N of length Nlen bits or n octet length. - * valid payload P of length Plen bits. - * valid associated data A of length Alen bits or a octet length. - * - * Output: - * ciphertext C. - * authentication tag T of length Tlen bits or t octet length. - * - * Steps: - * 1. Apply the formatting function to (N, A, P) to produce the blocks - * B0, B1, B2, ..., Br. Each block is 16bytes and 128bit length. - * 2. Set Y0 = CIPH_K(B0). - * 3. For i = 1 to r, do Yi = CIPH_K(Bi ⊕ Yi-1). - * 4. Set T = MSBTlen(Yr). - * 5. Apply the counter generation function to generate the counter - * blocks Ctr0, Ctr1,..., Ctrm, where m = ⎡Plen/128⎤. - * 6. For j = 0 to m, do Sj = CIPH_K(Ctrj). - * 7. Set S = S1 || S2 || ...|| Sm. - * 8. Return C = (P ⊕ MSBPlen(S)) || (T ⊕ MSBTlen(S0)). - * - * req->iv has the formatted ctr0 (i.e. Flags || N || 0) as per - * NIST 800-38C and rfc 3610 notation. - */ -static int tegra_se_aes_ccm_encrypt(struct aead_request *req) -{ - struct tegra_se_dev *se_dev; - int err; - - se_dev = se_devices[SE_AEAD]; - - mutex_lock(&se_dev->mtx); - err = tegra_se_ccm_init_crypt(req); - if (err) - goto out; - - err = tegra_se_ccm_compute_auth(req, ENCRYPT); - if (err) - goto out; - - err = tegra_se_ccm_ctr(req, ENCRYPT); - if (err) - goto out; - - ccm_ctr_add_encrypted_mac_to_dest(req); -out: - mutex_unlock(&se_dev->mtx); - - return err; -} - -/* - * CCM Decryption-Verification operation. - * - * Input: - * valid nonce N of length Nlen bits or n octet length. - * valid associated data A of length Alen bits or a octet length. - * purported ciphertext C (contains MAC of length Tlen) of length Clen bits. - * - * Output: - * either the payload P or INVALID. - * - * Steps: - * 1. If Clen ≤ Tlen, then return INVALID. - * 2. Apply the counter generation function to generate the counter - * blocks Ctr0, Ctr1,..., Ctrm, where m = ⎡(Clen-Tlen)/128⎤. - * 3. For j = 0 to m, do Sj = CIPH_K(Ctrj). - * 4. Set S = S1 || S2 || ...|| Sm. - * 5. Set P = MSB(Clen-Tlen)(C) ⊕ MSB(Clen-Tlen)(S). - * 6. Set T = LSB(Tlen)(C) ⊕ MSB(Tlen)(S0). - * 7. If N, A, or P is not valid, then return INVALID, else apply the - * formatting function to (N, A, P) to produce the blocks B0, B1,...,Br. - * 8. Set Y0 = CIPH_K(B0). - * 9. For i = 1 to r, do Yi = CIPH_K(Bi ⊕ Yi-1). - * 10. If T ≠ MSBTlen(Yr), then return INVALID, else return P. - * - * req->iv has the formatted ctr0 (i.e. Flags || N || 0) as per - * NIST 800-38C and rfc 3610 notation. - */ -static int tegra_se_aes_ccm_decrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev; - int err; - - se_dev = se_devices[SE_AEAD]; - - mutex_lock(&se_dev->mtx); - err = tegra_se_ccm_init_crypt(req); - if (err) - goto out; - - err = tegra_se_ccm_ctr(req, DECRYPT); - if (err) - goto out; - - err = tegra_se_ccm_compute_auth(req, DECRYPT); - if (err) - goto out; - - if (crypto_memneq(ctx->mac, ctx->dec_mac, ctx->authsize)) - err = -EBADMSG; -out: - mutex_unlock(&se_dev->mtx); - return err; -} - -static int tegra_se_aes_ccm_init(struct crypto_aead *tfm) -{ - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_AEAD]; - int i = 0, err = 0; - - crypto_aead_set_reqsize(tfm, sizeof(struct tegra_se_req_context)); - - mutex_lock(&se_dev->mtx); - - for (i = 0; i < 4; i++) { - ctx->buf[i] = dma_alloc_coherent( - se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &ctx->buf_addr[i], GFP_KERNEL); - if (!ctx->buf[i]) { - while (i > 0) { - i--; - dma_free_coherent( - se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - ctx->buf[i], ctx->buf_addr[i]); - ctx->buf[i] = NULL; - } - dev_err(se_dev->dev, "Cannot allocate memory to buf[0]\n"); - mutex_unlock(&se_dev->mtx); - return -ENOMEM; - } - } - - ctx->mac = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &ctx->mac_addr, GFP_KERNEL); - if (!ctx->mac) { - err = -ENOMEM; - goto out; - } - - ctx->enc_mac = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &ctx->enc_mac_addr, GFP_KERNEL); - if (!ctx->enc_mac) { - err = -ENOMEM; - goto out; - } - - ctx->dec_mac = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &ctx->dec_mac_addr, GFP_KERNEL); - if (!ctx->dec_mac) { - err = -ENOMEM; - goto out; - } -out: - mutex_unlock(&se_dev->mtx); - return err; -} - -static void tegra_se_aes_ccm_exit(struct crypto_aead *tfm) -{ - struct tegra_se_aes_ccm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_AES]; - int i = 0; - - mutex_lock(&se_dev->mtx); - tegra_se_free_key_slot(ctx->slot); - ctx->slot = NULL; - for (i = 0; i < 4; i++) { - dma_free_coherent( - se_dev->dev, (TEGRA_SE_AES_BLOCK_SIZE), - ctx->buf[i], ctx->buf_addr[i]); - } - - dma_free_coherent(se_dev->dev, (TEGRA_SE_AES_BLOCK_SIZE), - ctx->mac, ctx->mac_addr); - - dma_free_coherent(se_dev->dev, (TEGRA_SE_AES_BLOCK_SIZE), - ctx->enc_mac, ctx->enc_mac_addr); - - dma_free_coherent(se_dev->dev, (TEGRA_SE_AES_BLOCK_SIZE), - ctx->dec_mac, ctx->dec_mac_addr); - - mutex_unlock(&se_dev->mtx); -} - -static int tegra_se_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, - unsigned int keylen) -{ - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev; - struct tegra_se_slot *pslot; - struct tegra_se_cmdbuf *cmdbuf; - u32 *cpuvaddr = NULL; - dma_addr_t iova = 0; - int ret = 0; - - se_dev = se_devices[SE_AEAD]; - - mutex_lock(&se_dev->mtx); - - /* Check for valid arguments */ - if (!ctx || !key) { - dev_err(se_dev->dev, "invalid context or key"); - ret = -EINVAL; - goto free_ctx; - } - - if ((keylen != TEGRA_SE_KEY_128_SIZE) && - (keylen != TEGRA_SE_KEY_192_SIZE) && - (keylen != TEGRA_SE_KEY_256_SIZE)) { - dev_err(se_dev->dev, "invalid key size"); - ret = -EINVAL; - goto free_ctx; - } - - /* Get free key slot */ - pslot = tegra_se_alloc_key_slot(); - if (!pslot) { - dev_err(se_dev->dev, "no free key slot\n"); - ret = -ENOMEM; - goto free_ctx; - } - ctx->slot = pslot; - ctx->keylen = keylen; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_ctx; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - ret = tegra_se_send_key_data( - se_dev, (u8 *)key, keylen, ctx->slot->slot_num, - SE_KEY_TABLE_TYPE_GCM, se_dev->opcode_addr, - cmdbuf, NONE); - - tegra_se_free_key_slot(ctx->slot); - -free_ctx: - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_gcm_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - - switch (authsize) { - case 4: - case 8: - case 10: - case 12: - case 14: - case 16: - ctx->authsize = authsize; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int tegra_se_gcm_gmac(struct aead_request *req, bool encrypt) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - unsigned int assoclen; - struct tegra_se_dev *se_dev; - struct tegra_se_ll *src_ll; - struct scatterlist *sg; - struct tegra_se_cmdbuf *cmdbuf; - int ret; - u32 *cpuvaddr = NULL; - dma_addr_t iova = 0; - - /* 0 - Setup */ - se_dev = ctx->se_dev; - assoclen = req->assoclen; - sg = req->src; - - /* 1 - Map associated data */ - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - src_ll = se_dev->src_ll; - - ret = tegra_map_sg(se_dev->dev, sg, 1, DMA_TO_DEVICE, - se_dev->src_ll, assoclen); - if (!ret) - return ret; - - /* 2 - Prepare command buffer and submit request to hardware */ - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto out; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - se_dev->dst_ll = se_dev->src_ll; /* dummy */ - - /* 2.2 Prepare and submit request */ - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - encrypt ? ENCRYPT : DECRYPT, SE_AES_GMAC); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, 0, - ctx->slot->slot_num, 0, true); - - tegra_se_send_gcm_data(se_dev, req_ctx, assoclen, se_dev->opcode_addr, - cpuvaddr, SE_AES_GMAC); - ret = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - -out: - /* 3 - Clean up */ - tegra_unmap_sg(se_dev->dev, sg, DMA_TO_DEVICE, assoclen); - - return ret; -} - -static int tegra_se_gcm_op(struct aead_request *req, bool encrypt) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - unsigned int assoclen, cryptlen, mapped_cryptlen, mapped_len; - struct tegra_se_ll *src_ll, *dst_ll; - struct scatterlist *src_sg, *src_sg_start; - struct tegra_se_dev *se_dev; - struct tegra_se_cmdbuf *cmdbuf; - u32 *cpuvaddr = NULL, num_sgs; - dma_addr_t iova = 0; - int ret, index, count; - u32 iv[4]; - u8 *dst_buf; - dma_addr_t dst_buf_dma_addr; - - /* 0 - Setup */ - se_dev = ctx->se_dev; - se_dev->src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf); - se_dev->dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf); - src_ll = se_dev->src_ll; - dst_ll = se_dev->dst_ll; - mapped_cryptlen = 0; - mapped_len = 0; - ret = 0; index = 0; - assoclen = req->assoclen; - if (encrypt) - cryptlen = req->cryptlen; - else - cryptlen = req->cryptlen - ctx->authsize; - - /* If destination buffer is scattered over multiple sg, destination - * buffer splits need not be same as src_sg so create - * separate buffer for destination. Onebyte extra to handle zero - * cryptlen case. - */ - dst_buf = dma_alloc_coherent(se_dev->dev, cryptlen+1, &dst_buf_dma_addr, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!dst_buf) - return -ENOMEM; - - /* 1. Add plain text to src */ - src_sg = req->src; - src_sg_start = src_sg; - /* 1.1 Iterative over assoclen, to skip it for encryption */ - count = assoclen; - while (count) { - /* If a SG has both assoc data and PT/CT data */ - if (count < src_sg->length) { - ret = dma_map_sg(se_dev->dev, src_sg, 1, - DMA_TO_DEVICE); - if (!ret) { - pr_err("%s:%d dma_map_sg error\n", - __func__, __LINE__); - goto free_dst_buf; - } - - /* Fill PT/CT src list details from this sg */ - src_ll->addr = sg_dma_address(src_sg) + count; - /* SG length can be more than cryptlen and associated - * data, in this case encrypted length should be - * cryptlen - */ - if (cryptlen + count <= src_sg->length) - src_ll->data_len = cryptlen; - else - src_ll->data_len = src_sg->length - count; - mapped_cryptlen = src_ll->data_len; - mapped_len = src_sg->length; - - /* Fill dst list details */ - dst_ll->addr = dst_buf_dma_addr; - dst_ll->data_len = src_ll->data_len; - index += src_ll->data_len; - dst_ll++; - src_ll++; - /* Store from where src_sg is mapped, so it can be - * unmapped. - */ - src_sg_start = src_sg; - src_sg = sg_next(src_sg); - break; - } - - /* If whole SG is associated data iterate over it */ - count -= min_t(size_t, (size_t)src_sg->length, (size_t)count); - src_sg = sg_next(src_sg); - } - - /* 1.2 Add plain text to src_ll list */ - ret = tegra_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE, - src_ll, cryptlen - mapped_cryptlen); - if (ret < 0) - goto free_dst_buf; - - mapped_len += (cryptlen - mapped_cryptlen); - - /* 1.3 Fill dst_ll list */ - while (src_sg && cryptlen) { - dst_ll->addr = dst_buf_dma_addr + index; - dst_ll->data_len = src_ll->data_len; - index += dst_ll->data_len; - dst_ll++; - src_ll++; - src_sg = sg_next(src_sg); - } - - /* 2. Encrypt/Decrypt using GCTR and perform GHASH on plain text */ - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - ret = PTR_ERR(cmdbuf); - goto free_dst_buf; - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - /* 2.2 Prepare and program J0 (i.e. iv || 0^31 || 1) using IV */ - memset(iv, 0, 16); - memcpy(iv, req->iv, 12); - iv[3] = (1 << 24); - tegra_se_send_ctr_seed(se_dev, iv, se_dev->opcode_addr, cpuvaddr); - - /* 2.3 Submit request */ - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - 0, encrypt ? SE_AES_GCM_ENC : SE_AES_GCM_DEC); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, 0, - ctx->slot->slot_num, 0, true); - tegra_se_send_gcm_data(se_dev, req_ctx, cryptlen, - se_dev->opcode_addr, cpuvaddr, - encrypt ? SE_AES_GCM_ENC : SE_AES_GCM_DEC); - - ret = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - - /* 3 clean up */ - tegra_unmap_sg(se_dev->dev, src_sg_start, DMA_TO_DEVICE, mapped_len); - num_sgs = tegra_se_count_sgs(req->dst, assoclen + cryptlen); - sg_pcopy_from_buffer(req->dst, num_sgs, dst_buf, cryptlen, - assoclen); -free_dst_buf: - dma_free_coherent(se_dev->dev, cryptlen+1, dst_buf, dst_buf_dma_addr); - - return ret; -} - -static void gcm_program_aad_msg_len(struct tegra_se_dev *se_dev, - unsigned int alen, unsigned int clen, - unsigned int opcode_addr, u32 *cpuvaddr) -{ - u32 i = 0; - - i = se_dev->cmdbuf_cnt; - - cpuvaddr[i++] = __nvhost_opcode_incr(opcode_addr + - SE_AES_CRYPTO_AAD_LENGTH_0_OFFSET, 2); - cpuvaddr[i++] = alen*8; - cpuvaddr[i++] = 0; - - cpuvaddr[i++] = __nvhost_opcode_incr(opcode_addr + - SE_AES_CRYPTO_MSG_LENGTH_0_OFFSET, 2); - cpuvaddr[i++] = clen*8; - cpuvaddr[i++] = 0; - - se_dev->cmdbuf_cnt = i; -} - -static void gcm_add_encrypted_mac_to_dest(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct scatterlist *dst_sg; - u32 num_sgs; - - dst_sg = req->dst; - num_sgs = tegra_se_count_sgs(dst_sg, - req->assoclen + req->cryptlen + ctx->authsize); - sg_pcopy_from_buffer(dst_sg, num_sgs, ctx->mac, - ctx->authsize, req->assoclen + req->cryptlen); - -} - -static int tegra_se_gcm_final(struct aead_request *req, bool encrypt) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - struct tegra_se_cmdbuf *cmdbuf; - u32 *cpuvaddr = NULL; - unsigned int cryptlen; - dma_addr_t iova = 0; - int ret, i; - struct tegra_se_dev *se_dev; - u32 iv[4], val; - - /* 1 Get free command buffer */ - se_dev = ctx->se_dev; - - cmdbuf = tegra_se_alloc_cmdbuf(se_dev->dev->parent, SZ_16K); - if (IS_ERR(cmdbuf)) { - dev_err(se_dev->dev, "cmdbuf allocation failed\n"); - return PTR_ERR(cmdbuf); - } - - cpuvaddr = cmdbuf->addr; - iova = cmdbuf->iova; - - /* 2. Configure AAD and MSG length */ - if (encrypt) - cryptlen = req->cryptlen; - else - cryptlen = req->cryptlen - ctx->authsize; - gcm_program_aad_msg_len(se_dev, req->assoclen, cryptlen, - se_dev->opcode_addr, cpuvaddr); - - /* 3. Prepare and program J0 (i.e. iv || 0^31 || 1 ) using IV */ - memset(iv, 0, 16); - memcpy(iv, req->iv, 12); - iv[3] = (1 << 24); - tegra_se_send_ctr_seed(se_dev, iv, se_dev->opcode_addr, cpuvaddr); - - /* 4. Program command buffer */ - req_ctx->op_mode = SE_AES_OP_MODE_GCM; - req_ctx->config = tegra_se_get_config(se_dev, req_ctx->op_mode, - encrypt ? ENCRYPT : DECRYPT, SE_AES_GCM_FINAL); - req_ctx->crypto_config = tegra_se_get_crypto_config( - se_dev, req_ctx->op_mode, 0, - ctx->slot->slot_num, 0, true); - i = se_dev->cmdbuf_cnt; - - /* 4.1 Config register */ - cpuvaddr[i++] = __nvhost_opcode_incr(se_dev->opcode_addr, 2); - cpuvaddr[i++] = req_ctx->config; - cpuvaddr[i++] = req_ctx->crypto_config; - - /* 4.2 Destination address */ - cpuvaddr[i++] = __nvhost_opcode_incr( - se_dev->opcode_addr + SE_AES_OUT_ADDR_OFFSET, 2); - cpuvaddr[i++] = (u32)(ctx->mac_addr); - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(MSB(ctx->mac_addr)) | - SE_ADDR_HI_SZ(SE_AES_GCM_GMAC_SIZE)); - - /* 4.3 Handle zero size */ - cpuvaddr[i++] = __nvhost_opcode_nonincr( - se_dev->opcode_addr + SE_AES_OPERATION_OFFSET, 1); - val = SE_OPERATION_WRSTALL(WRSTALL_TRUE); - /* If GMAC or GCM_ENC/GCM_DEC is not operated before then - * set SE_OPERATION.INIT to true. - */ - if (req_ctx->init != true) - val |= SE_OPERATION_INIT(INIT_TRUE); - val |= SE_OPERATION_FINAL(FINAL_TRUE); - val |= SE_OPERATION_OP(OP_START); - val |= SE_OPERATION_LASTBUF(LASTBUF_TRUE); - - cpuvaddr[i++] = val; - se_dev->cmdbuf_cnt = i; - - /* 5 Submit request */ - ret = tegra_se_channel_submit_gather(se_dev, cmdbuf, - 0, se_dev->cmdbuf_cnt, NONE); - - return ret; -} - -/* - * GCM encrypt operation. - - * Input: - * initialization vector IV (whose length is supported). - * plaintext P (whose length is supported). - * additional authenticated data A (whose length is supported). - - * Output: - * ciphertext C. - * authentication tag T. - - * Steps: - * 1. Let H = CIPH_K(0^128). - * 2. Define a block, J0, as follows: - * If len(IV) = 96, then let J0 = IV || 0^31 || 1. - * If len(IV) ≠ 96, then let s = 128 * ⎡len(IV)/128⎤-len(IV), - * and let J0 = GHASH_H(IV || 0^(s+64) || [len(IV)]_64). - * 3. Let C = GCTR_K(inc_32(J0), P). - * 4. Let u = 128 * ⎡len(C)/128⎤-len(C) and - v = 128 * ⎡len(A)/128⎤-len(A) - * 5. Define a block, S, as follows: - * S = GHASH_H (A || 0^v || C || 0^u || [len(A)]_64 || [len(C)]_64). - * 6. Let T = MSB_t(GCTR_K(J0, S)) - * 7. Return (C, T). - - * SE hardware provide below 3 operations for above steps. - * Op1: GMAC - step 1 and partial of 5 (i.e. GHASH_H (A || 0^v)). - * Op2: GCM_ENC - step 1 to 4 and parital of 5 (i.e. GHASH_H (C || 0^u)). - * Op3: GCM_FINAL - step partial of 5 - * (i.e. GHASH_H ([len(A)]_64 || [len(C)]_64) and 6. - */ - -static int tegra_se_aes_gcm_encrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - int ret; - - ctx->se_dev = se_devices[SE_AEAD]; - req_ctx->init = false; - req_ctx->op_mode = SE_AES_OP_MODE_GCM; - - mutex_lock(&ctx->se_dev->mtx); - - /* If there is associated date perform GMAC operation */ - if (req->assoclen) { - ret = tegra_se_gcm_gmac(req, ENCRYPT); - if (ret) - goto out; - } - - /* GMAC_ENC operation */ - if (req->cryptlen) { - ret = tegra_se_gcm_op(req, ENCRYPT); - if (ret) - goto out; - } - - /* GMAC_FINAL operation */ - ret = tegra_se_gcm_final(req, ENCRYPT); - if (ret) - goto out; - - /* Copy the mac to request destination */ - gcm_add_encrypted_mac_to_dest(req); -out: - mutex_unlock(&ctx->se_dev->mtx); - - return ret; -} - -/* - * GCM decrypt operation. - * - * Input: - * initialization vector IV (whose length is supported). - * ciphertext C (whose length is supported). - * additional authenticated data A (whose length is supported). - * authentication tag T. - * - * Output: - * plaintext P. - * verify tag. - * - * Steps: - * 1. If the bit lengths of IV, A or C are not supported, or if len(T) ≠t, - * then return FAIL. - * 2. Let H = CIPH_K(0^128). - * 3. Define a block, J0, as follows: - * If len(IV) = 96, then J0 = IV || 0^31 || 1. - * If len(IV) ≠ 96, then let s = 128 ⎡len(IV)/128⎤-len(IV), - * and let J0 = GHASH_H(IV || 0^(s+64) || [len(IV)]_64). - * 4. Let P = GCTR_K(inc_32(J0), C). - * 5. Let u = 128 * ⎡len(C)/128⎤-len(C) and - v = 128 * ⎡len(A)/128⎤-len(A) - * 6. Define a block, S, as follows: - * S = GHASH_H (A || 0^v || C || 0^u || [len(A)]_64 || [len(C)]_64) - * 7. Let T′ = MSB_t(GCTR_K(J0, S)) - * 8. If T = T′, then return P; else return FAIL. - * - * SE hardware provide below 3 operations. - * Op1: GMAC - step 2 and partial of 6 (i.e. GHASH_H (A || 0^v)). - * Op2: GCM_DEC - step 2 to 4 and parital of 6 (i.e. GHASH_H (C || 0^u)). - * Op3: GCM_FINAL - step partial of 5 - * (i.e. GHASH_H ([len(A)]_64 || [len(C)]_64) and 6. - */ -static int tegra_se_aes_gcm_decrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_req_context *req_ctx = aead_request_ctx(req); - u8 mac[16]; - struct scatterlist *sg; - u32 num_sgs; - int ret; - - ctx->se_dev = se_devices[SE_AEAD]; - req_ctx->init = false; - req_ctx->op_mode = SE_AES_OP_MODE_GCM; - - mutex_lock(&ctx->se_dev->mtx); - if (req->assoclen) { - ret = tegra_se_gcm_gmac(req, DECRYPT); - if (ret) - goto out; - } - - if (req->cryptlen - req->assoclen - ctx->authsize) { - ret = tegra_se_gcm_op(req, DECRYPT); - if (ret) - goto out; - } - - ret = tegra_se_gcm_final(req, DECRYPT); - if (ret) - goto out; - - /* Verify the mac */ - sg = req->src; - num_sgs = tegra_se_count_sgs(sg, req->assoclen + req->cryptlen); - sg_pcopy_to_buffer(sg, num_sgs, mac, ctx->authsize, - req->assoclen + req->cryptlen - ctx->authsize); - if (crypto_memneq(ctx->mac, mac, ctx->authsize)) - ret = -EBADMSG; -out: - mutex_unlock(&ctx->se_dev->mtx); - - return ret; -} - -static int tegra_se_aes_gcm_init(struct crypto_aead *tfm) -{ - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_AEAD]; - int ret = 0; - - crypto_aead_set_reqsize(tfm, sizeof(struct tegra_se_req_context)); - - mutex_lock(&se_dev->mtx); - ctx->mac = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - &ctx->mac_addr, GFP_KERNEL); - if (!ctx->mac) - ret = -ENOMEM; - - mutex_unlock(&se_dev->mtx); - - return ret; -} - -static void tegra_se_aes_gcm_exit(struct crypto_aead *tfm) -{ - struct tegra_se_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct tegra_se_dev *se_dev = se_devices[SE_AEAD]; - - mutex_lock(&se_dev->mtx); - tegra_se_free_key_slot(ctx->slot); - ctx->slot = NULL; - - dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, - ctx->mac, ctx->mac_addr); - - mutex_unlock(&se_dev->mtx); -} - -static struct aead_alg aead_algs[] = { - { - .setkey = tegra_se_aes_ccm_setkey, - .setauthsize = tegra_se_aes_ccm_setauthsize, - .encrypt = tegra_se_aes_ccm_encrypt, - .decrypt = tegra_se_aes_ccm_decrypt, - .init = tegra_se_aes_ccm_init, - .exit = tegra_se_aes_ccm_exit, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - .base = { - .cra_name = "ccm(aes)", - .cra_driver_name = "ccm-aes-tegra", - .cra_priority = 1000, - .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_aes_ccm_ctx), - .cra_module = THIS_MODULE, - } - }, { - .setkey = tegra_se_aes_gcm_setkey, - .setauthsize = tegra_se_aes_gcm_setauthsize, - .encrypt = tegra_se_aes_gcm_encrypt, - .decrypt = tegra_se_aes_gcm_decrypt, - .init = tegra_se_aes_gcm_init, - .exit = tegra_se_aes_gcm_exit, - .ivsize = GCM_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - .base = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-tegra", - .cra_priority = 1000, - .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_aes_gcm_ctx), - .cra_module = THIS_MODULE, - } - } -}; - -static struct kpp_alg dh_algs[] = { - { - .set_secret = tegra_se_dh_set_secret, - .generate_public_key = tegra_se_dh_compute_value, - .compute_shared_secret = tegra_se_dh_compute_value, - .max_size = tegra_se_dh_max_size, - .exit = tegra_se_dh_exit_tfm, - .base = { - .cra_name = "dh", - .cra_driver_name = "tegra-se-dh", - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct tegra_se_dh_context), - } - } -}; - -static struct rng_alg rng_algs[] = { - { - .generate = tegra_se_rng_drbg_get_random, - .seed = tegra_se_rng_drbg_reset, - .seedsize = TEGRA_SE_RNG_SEED_SIZE, - .base = { - .cra_name = "rng_drbg", - .cra_driver_name = "rng_drbg-aes-tegra", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = sizeof(struct tegra_se_rng_context), - .cra_module = THIS_MODULE, - .cra_init = tegra_se_rng_drbg_init, - .cra_exit = tegra_se_rng_drbg_exit, - } - } -}; - -static struct skcipher_alg aes_algs[] = { - { - .base.cra_name = "xts(aes)", - .base.cra_driver_name = "xts-aes-tegra", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct tegra_se_aes_context), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = tegra_se_aes_cra_init, - .exit = tegra_se_aes_cra_exit, - .setkey = tegra_se_aes_setkey, - .encrypt = tegra_se_aes_xts_encrypt, - .decrypt = tegra_se_aes_xts_decrypt, - .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, - .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, - .ivsize = TEGRA_SE_AES_IV_SIZE, - }, - { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-tegra", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct tegra_se_aes_context), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = tegra_se_aes_cra_init, - .exit = tegra_se_aes_cra_exit, - .setkey = tegra_se_aes_setkey, - .encrypt = tegra_se_aes_cbc_encrypt, - .decrypt = tegra_se_aes_cbc_decrypt, - .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, - .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, - .ivsize = TEGRA_SE_AES_IV_SIZE, - }, - { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-tegra", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct tegra_se_aes_context), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = tegra_se_aes_cra_init, - .exit = tegra_se_aes_cra_exit, - .setkey = tegra_se_aes_setkey, - .encrypt = tegra_se_aes_ecb_encrypt, - .decrypt = tegra_se_aes_ecb_decrypt, - .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, - .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, - .ivsize = TEGRA_SE_AES_IV_SIZE, - }, - { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-tegra", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct tegra_se_aes_context), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = tegra_se_aes_cra_init, - .exit = tegra_se_aes_cra_exit, - .setkey = tegra_se_aes_setkey, - .encrypt = tegra_se_aes_ctr_encrypt, - .decrypt = tegra_se_aes_ctr_decrypt, - .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, - .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, - .ivsize = TEGRA_SE_AES_IV_SIZE, - }, - { - .base.cra_name = "ofb(aes)", - .base.cra_driver_name = "ofb-aes-tegra", - .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct tegra_se_aes_context), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = tegra_se_aes_cra_init, - .exit = tegra_se_aes_cra_exit, - .setkey = tegra_se_aes_setkey, - .encrypt = tegra_se_aes_ofb_encrypt, - .decrypt = tegra_se_aes_ofb_decrypt, - .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, - .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, - .ivsize = TEGRA_SE_AES_IV_SIZE, - }, -}; - -static struct ahash_alg hash_algs[] = { - { - .init = tegra_se_aes_cmac_init, - .update = tegra_se_aes_cmac_update, - .final = tegra_se_aes_cmac_final, - .finup = tegra_se_aes_cmac_finup, - .digest = tegra_se_aes_cmac_digest, - .setkey = tegra_se_aes_cmac_setkey, - .export = tegra_se_aes_cmac_export, - .import = tegra_se_aes_cmac_import, - .halg.digestsize = TEGRA_SE_AES_CMAC_DIGEST_SIZE, - .halg.statesize = TEGRA_SE_AES_CMAC_STATE_SIZE, - .halg.base = { - .cra_name = "cmac(aes)", - .cra_driver_name = "tegra-se-cmac(aes)", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_aes_cmac_cra_init, - .cra_exit = tegra_se_aes_cmac_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = SHA1_STATE_SIZE, - .halg.base = { - .cra_name = "sha1", - .cra_driver_name = "tegra-se-sha1", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.statesize = SHA224_STATE_SIZE, - .halg.base = { - .cra_name = "sha224", - .cra_driver_name = "tegra-se-sha224", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = SHA256_STATE_SIZE, - .halg.base = { - .cra_name = "sha256", - .cra_driver_name = "tegra-se-sha256", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = SHA384_STATE_SIZE, - .halg.base = { - .cra_name = "sha384", - .cra_driver_name = "tegra-se-sha384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = SHA512_STATE_SIZE, - .halg.base = { - .cra_name = "sha512", - .cra_driver_name = "tegra-se-sha512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_224_DIGEST_SIZE, - .halg.statesize = SHA3_224_STATE_SIZE, - .halg.base = { - .cra_name = "sha3-224", - .cra_driver_name = "tegra-se-sha3-224", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_256_DIGEST_SIZE, - .halg.statesize = SHA3_256_STATE_SIZE, - .halg.base = { - .cra_name = "sha3-256", - .cra_driver_name = "tegra-se-sha3-256", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_384_DIGEST_SIZE, - .halg.statesize = SHA3_384_STATE_SIZE, - .halg.base = { - .cra_name = "sha3-384", - .cra_driver_name = "tegra-se-sha3-384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_512_DIGEST_SIZE, - .halg.statesize = SHA3_512_STATE_SIZE, - .halg.base = { - .cra_name = "sha3-512", - .cra_driver_name = "tegra-se-sha3-512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, - { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_512_DIGEST_SIZE, - .halg.statesize = SHA3_512_STATE_SIZE, - .halg.base = { - .cra_name = "shake128", - .cra_driver_name = "tegra-se-shake128", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, - - { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .halg.digestsize = SHA3_512_DIGEST_SIZE, - .halg.statesize = SHA3_512_STATE_SIZE, - .halg.base = { - .cra_name = "shake256", - .cra_driver_name = "tegra-se-shake256", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA3_512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, - { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .setkey = tegra_se_sha_hmac_setkey, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.statesize = SHA224_STATE_SIZE, - .halg.base = { - .cra_name = "hmac(sha224)", - .cra_driver_name = "tegra-se-hmac-sha224", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .setkey = tegra_se_sha_hmac_setkey, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = SHA256_STATE_SIZE, - .halg.base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "tegra-se-hmac-sha256", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .setkey = tegra_se_sha_hmac_setkey, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = SHA384_STATE_SIZE, - .halg.base = { - .cra_name = "hmac(sha384)", - .cra_driver_name = "tegra-se-hmac-sha384", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - }, { - .init = tegra_se_sha_init, - .update = tegra_se_sha_update, - .final = tegra_se_sha_final, - .finup = tegra_se_sha_finup, - .digest = tegra_se_sha_digest, - .export = tegra_se_sha_export, - .import = tegra_se_sha_import, - .setkey = tegra_se_sha_hmac_setkey, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = SHA512_STATE_SIZE, - .halg.base = { - .cra_name = "hmac(sha512)", - .cra_driver_name = "tegra-se-hmac-sha512", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_se_sha_context), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = tegra_se_sha_cra_init, - .cra_exit = tegra_se_sha_cra_exit, - } - } -}; - -static struct akcipher_alg rsa_alg = { - .encrypt = tegra_se_rsa_op, - .decrypt = tegra_se_rsa_op, - .sign = tegra_se_rsa_op, - .verify = tegra_se_rsa_op, - .set_priv_key = tegra_se_rsa_setkey, - .set_pub_key = tegra_se_rsa_setkey, - .max_size = tegra_se_rsa_max_size, - .exit = tegra_se_rsa_exit, - .base = { - .cra_name = "rsa-pka0", - .cra_driver_name = "tegra-se-pka0-rsa", - .cra_priority = 300, - .cra_ctxsize = sizeof(struct tegra_se_aes_rsa_context), - .cra_module = THIS_MODULE, - } -}; - -static int tegra_se_nvhost_prepare_poweroff(struct platform_device *pdev) -{ - - /* Runtime PM not supported by Host1x */ - return 0; -} - -static struct tegra_se_chipdata tegra18_se_chipdata = { - .aes_freq = 600000000, - .cpu_freq_mhz = 2400, - .kac_type = SE_KAC_T18X, -}; - -static struct nvhost_device_data nvhost_se1_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - 0, NULL /*TEGRA_BWMGR_SET_EMC_FLOOR*/}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE1_CLASS_ID, - .private_data = &tegra18_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, -// .bwmgr_client_id = TEGRA_BWMGR_CLIENT_SE1, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct nvhost_device_data nvhost_se2_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - 0, NULL /*TEGRA_BWMGR_SET_EMC_FLOOR*/}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE2_CLASS_ID, - .private_data = &tegra18_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, -// .bwmgr_client_id = TEGRA_BWMGR_CLIENT_SE2, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct nvhost_device_data nvhost_se3_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - 0, NULL /*TEGRA_BWMGR_SET_EMC_FLOOR*/}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE3_CLASS_ID, - .private_data = &tegra18_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, -// .bwmgr_client_id = TEGRA_BWMGR_CLIENT_SE3, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct nvhost_device_data nvhost_se4_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - 0, NULL /*TEGRA_BWMGR_SET_EMC_FLOOR*/}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE4_CLASS_ID, - .private_data = &tegra18_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, -// .bwmgr_client_id = TEGRA_BWMGR_CLIENT_SE4, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct tegra_se_chipdata tegra23_se_chipdata = { - .aes_freq = 600000000, - .cpu_freq_mhz = 2400, - .kac_type = SE_KAC_T23X, -}; - -static struct nvhost_device_data nvhost_t234_se1_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - TEGRA_SET_EMC_FLOOR}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE1_CLASS_ID, - .private_data = &tegra23_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, - .icc_id = TEGRA_ICC_SE, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct nvhost_device_data nvhost_t234_se2_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - TEGRA_SET_EMC_FLOOR}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE2_CLASS_ID, - .private_data = &tegra23_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, - .icc_id = TEGRA_ICC_SE, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static struct nvhost_device_data nvhost_t234_se4_info = { - .clocks = {{"se", 600000000}, - {"emc", UINT_MAX, - NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER, - TEGRA_SET_EMC_FLOOR}, {} }, - .can_powergate = true, - .autosuspend_delay = 500, - .class = NV_SE4_CLASS_ID, - .private_data = &tegra23_se_chipdata, - .serialize = 1, - .push_work_done = 1, - .vm_regs = {{SE_STREAMID_REG_OFFSET, true} }, - .kernel_only = true, - .icc_id = TEGRA_ICC_SE, - .prepare_poweroff = tegra_se_nvhost_prepare_poweroff, -}; - -static const struct of_device_id tegra_se_of_match[] = { - { - .compatible = "nvidia,tegra186-se1-nvhost", - .data = &nvhost_se1_info, - }, { - .compatible = "nvidia,tegra186-se2-nvhost", - .data = &nvhost_se2_info, - }, { - .compatible = "nvidia,tegra186-se3-nvhost", - .data = &nvhost_se3_info, - }, { - .compatible = "nvidia,tegra186-se4-nvhost", - .data = &nvhost_se4_info, - }, { - .compatible = "nvidia,tegra234-se1-nvhost", - .data = &nvhost_t234_se1_info, - }, { - .compatible = "nvidia,tegra234-se2-nvhost", - .data = &nvhost_t234_se2_info, - }, { - .compatible = "nvidia,tegra234-se4-nvhost", - .data = &nvhost_t234_se4_info, - }, {} -}; -MODULE_DEVICE_TABLE(of, tegra_se_of_match); - -static bool is_algo_supported(struct device_node *node, char *algo) -{ - if (of_property_match_string(node, "supported-algos", algo) >= 0) - return true; - else - return false; -} - -static void tegra_se_fill_se_dev_info(struct tegra_se_dev *se_dev) -{ - struct device_node *node = of_node_get(se_dev->dev->of_node); - - if (is_algo_supported(node, "aes")) - se_devices[SE_AES] = se_dev; - if (is_algo_supported(node, "drbg")) - se_devices[SE_DRBG] = se_dev; - if (is_algo_supported(node, "sha")) - se_devices[SE_SHA] = se_dev; - if (is_algo_supported(node, "rsa")) - se_devices[SE_RSA] = se_dev; - if (is_algo_supported(node, "cmac")) - se_devices[SE_CMAC] = se_dev; - if (is_algo_supported(node, "aead")) - se_devices[SE_AEAD] = se_dev; -} - - -static int tegra_se_crypto_register(struct tegra_se_dev *se_dev) -{ - struct device_node *node = of_node_get(se_dev->dev->of_node); - int i, err; - - if (is_algo_supported(node, "drbg")) { - INIT_LIST_HEAD(&rng_algs[0].base.cra_list); - err = crypto_register_rng(&rng_algs[0]); - if (err) { - dev_err(se_dev->dev, "crypto_register_rng failed\n"); - goto reg_fail; - } - } - - if (is_algo_supported(node, "xts")) { - INIT_LIST_HEAD(&aes_algs[0].base.cra_list); - err = crypto_register_skcipher(&aes_algs[0]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_alg xts failed\n"); - goto reg_fail; - } - } - - if (is_algo_supported(node, "aes")) { - for (i = 1; i < ARRAY_SIZE(aes_algs); i++) { - INIT_LIST_HEAD(&aes_algs[i].base.cra_list); - err = crypto_register_skcipher(&aes_algs[i]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_alg %s failed\n", - aes_algs[i].base.cra_name); - goto reg_fail; - } - } - } - - if (is_algo_supported(node, "cmac")) { - err = crypto_register_ahash(&hash_algs[0]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_ahash cmac failed\n"); - goto reg_fail; - } - } - - if (is_algo_supported(node, "sha")) { - for (i = 1; i < 6; i++) { - err = crypto_register_ahash(&hash_algs[i]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_ahash %s failed\n", - hash_algs[i].halg.base.cra_name); - goto reg_fail; - } - } - } - - if (is_algo_supported(node, "sha3")) { - for (i = 6; i < 12; i++) { - err = crypto_register_ahash(&hash_algs[i]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_ahash %s failed\n", - hash_algs[i].halg.base.cra_name); - goto reg_fail; - } - } - } - - if (is_algo_supported(node, "hmac")) { - for (i = 12; i < 16; i++) { - err = crypto_register_ahash(&hash_algs[i]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_ahash %s failed\n", - hash_algs[i].halg.base.cra_name); - goto reg_fail; - } - } - } - - if (is_algo_supported(node, "aead")) { - for (i = 0; i < 2; i++) { - err = crypto_register_aead(&aead_algs[i]); - if (err) { - dev_err(se_dev->dev, - "crypto_register_aead %s failed\n", - aead_algs[i].base.cra_name); - goto reg_fail; - } - } - } - - if (is_algo_supported(node, "rsa")) { - err = crypto_register_akcipher(&rsa_alg); - if (err) { - dev_err(se_dev->dev, "crypto_register_akcipher fail"); - goto reg_fail; - } - err = crypto_register_kpp(&dh_algs[0]); - if (err) { - dev_err(se_dev->dev, "crypto_register_kpp fail"); - goto reg_fail; - } - - se_dev->dh_buf1 = (u32 *)devm_kzalloc( - se_dev->dev, TEGRA_SE_RSA2048_INPUT_SIZE, - GFP_KERNEL); - se_dev->dh_buf2 = (u32 *)devm_kzalloc( - se_dev->dev, TEGRA_SE_RSA2048_INPUT_SIZE, - GFP_KERNEL); - if (!se_dev->dh_buf1 || !se_dev->dh_buf2) - goto reg_fail; - } - -reg_fail: - return 0; - -} - -static int tegra_se_clk_init(struct platform_device *pdev) -{ - struct nvhost_device_data *pdata = platform_get_drvdata(pdev); - unsigned int i; - int err; - - err = devm_clk_bulk_get_all(&pdev->dev, &pdata->clks); - if (err < 0) { - dev_err(&pdev->dev, "failed to get clocks %d\n", err); - return err; - } - - pdata->num_clks = err; - - for (i = 0; i < pdata->num_clks; i++) { - err = clk_set_rate(pdata->clks[i].clk, ULONG_MAX); - if (err < 0) { - dev_err(&pdev->dev, "failed to set clock rate!\n"); - return err; - } - } - - err = clk_bulk_prepare_enable(pdata->num_clks, pdata->clks); - if (err < 0) { - dev_err(&pdev->dev, "failed to enabled clocks: %d\n", err); - return err; - } - - return 0; -} - -static int tegra_se_client_init(struct host1x_client *client) -{ - return 0; -} - -static const struct host1x_client_ops tegra_se_client_ops = { - .init = tegra_se_client_init, -}; - -static int host1x_se_probe(struct host1x_device *dev) -{ - return host1x_device_init(dev); -} - -static int host1x_se_remove(struct host1x_device *dev) -{ - host1x_device_exit(dev); - - return 0; -} - -static int tegra_se_probe(struct platform_device *pdev) -{ - struct tegra_se_dev *se_dev = NULL; - struct nvhost_device_data *pdata = NULL; - const struct of_device_id *match; - int err = 0; - unsigned int val; - struct device_node *node = NULL; - const char *rsa_name; - struct iommu_fwspec *spec; - int ret; - - se_dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_se_dev), - GFP_KERNEL); - if (!se_dev) - return -ENOMEM; - - if (pdev->dev.of_node) { - match = of_match_device(of_match_ptr(tegra_se_of_match), - &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - pdata = (struct nvhost_device_data *)match->data; - } else { - pdata = - (struct nvhost_device_data *)pdev->id_entry->driver_data; - } - - mutex_init(&se_dev->lock); - crypto_init_queue(&se_dev->queue, TEGRA_SE_CRYPTO_QUEUE_LENGTH); - - se_dev->dev = &pdev->dev; - se_dev->pdev = pdev; - - dma_set_mask_and_coherent(se_dev->dev, DMA_BIT_MASK(39)); - - mutex_init(&pdata->lock); - pdata->pdev = pdev; - - /* store chipdata inside se_dev and store se_dev into private_data */ - se_dev->chipdata = pdata->private_data; - pdata->private_data = se_dev; - - /* store the pdata into drvdata */ - platform_set_drvdata(pdev, pdata); - INIT_LIST_HEAD(&se_dev->client.list); - se_dev->client.dev = &pdev->dev; - se_dev->client.ops = &tegra_se_client_ops; - se_dev->client.class = pdata->class; - ret = host1x_client_register(&se_dev->client); - - - err = nvhost_client_device_get_resources(pdev); - if (err) { - dev_err(se_dev->dev, - "nvhost_client_device_get_resources failed for SE(%s)\n", - pdev->name); - return err; - } - - err = tegra_se_clk_init(pdev); - if (err) { - dev_err(se_dev->dev, - "Enabling clocks failed for SE(%s)\n", - pdev->name); - return err; - } - -#if 0 - err = nvhost_client_device_init(pdev); - if (err) { - dev_err(se_dev->dev, - "nvhost_client_device_init failed for SE(%s)\n", - pdev->name); - return err; - } -#endif - - se_dev->channel = host1x_channel_request(&se_dev->client); - if (!se_dev->channel) { - dev_err(se_dev->dev, "Nvhost Channel map failed\n"); - return err; - } - - se_dev->io_regs = pdata->aperture[0]; - - node = of_node_get(se_dev->dev->of_node); - - spec = dev_iommu_fwspec_get(se_dev->dev); - if (spec) { - se_dev->stream_id = spec->ids[0] & 0xffff; - se_writel(se_dev, se_dev->stream_id, SE_STREAMID_REG_OFFSET); - } - - se_dev->ioc = of_property_read_bool(node, "nvidia,io-coherent"); - - err = of_property_read_u32(node, "opcode_addr", &se_dev->opcode_addr); - if (err) { - dev_err(se_dev->dev, "Missing opcode_addr property\n"); - return err; - } - - if (!of_property_count_strings(node, "supported-algos")) - return -ENOTSUPP; - - tegra_se_fill_se_dev_info(se_dev); - - if (is_algo_supported(node, "aes") || is_algo_supported(node, "drbg") - || is_algo_supported(node, "sha")) { - err = tegra_init_key_slot(se_dev); - if (err) { - dev_err(se_dev->dev, "init_key_slot failed\n"); - return err; - } - } - - if (is_algo_supported(node, "rsa")) { - err = tegra_init_rsa_key_slot(se_dev); - if (err) { - dev_err(se_dev->dev, "init_rsa_key_slot failed\n"); - return err; - } - } - - mutex_init(&se_dev->mtx); - INIT_WORK(&se_dev->se_work, tegra_se_work_handler); - se_dev->se_work_q = alloc_workqueue("se_work_q", - WQ_HIGHPRI | WQ_UNBOUND, 1); - if (!se_dev->se_work_q) { - dev_err(se_dev->dev, "alloc_workqueue failed\n"); - return -ENOMEM; - } - - err = tegra_se_alloc_ll_buf(se_dev, SE_MAX_SRC_SG_COUNT, - SE_MAX_DST_SG_COUNT); - if (err) { - dev_err(se_dev->dev, "can not allocate ll dma buffer\n"); - goto ll_alloc_fail; - } - - node = of_node_get(se_dev->dev->of_node); - - se_dev->syncpt_id = nvhost_get_syncpt_host_managed(se_dev->pdev, - 0, pdev->name); - if (!se_dev->syncpt_id) { - err = -EINVAL; - dev_err(se_dev->dev, "Cannot get syncpt_id for SE(%s)\n", - pdev->name); - goto reg_fail; - } - - se_dev->aes_src_ll = devm_kzalloc(&pdev->dev, sizeof(struct tegra_se_ll), - GFP_KERNEL); - se_dev->aes_dst_ll = devm_kzalloc(&pdev->dev, sizeof(struct tegra_se_ll), - GFP_KERNEL); - if (!se_dev->aes_src_ll || !se_dev->aes_dst_ll) { - dev_err(se_dev->dev, "Linked list memory allocation failed\n"); - goto ll_alloc_fail; - } - - if (se_dev->ioc) - se_dev->total_aes_buf = dma_alloc_coherent( - se_dev->dev, SE_MAX_MEM_ALLOC, - &se_dev->total_aes_buf_addr, - GFP_KERNEL); - else - se_dev->total_aes_buf = kzalloc(SE_MAX_MEM_ALLOC, GFP_KERNEL); - - if (!se_dev->total_aes_buf) { - err = -ENOMEM; - goto aes_buf_alloc_fail; - } - - tegra_se_init_aesbuf(se_dev); - - tegra_se_boost_cpu_init(se_dev); - err = of_property_read_u32(node, "pka0-rsa-priority", &val); - if (!err) - rsa_alg.base.cra_priority = val; - - err = of_property_read_string(node, "pka0-rsa-name", &rsa_name); - if (!err) - strncpy(rsa_alg.base.cra_name, rsa_name, - sizeof(rsa_alg.base.cra_name) - 1); - - if (is_algo_supported(node, "drbg")) { - se_writel(se_dev, - SE_RNG_SRC_CONFIG_RO_ENT_SRC(DRBG_RO_ENT_SRC_ENABLE) | - SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK( - DRBG_RO_ENT_SRC_LOCK_ENABLE), - SE_RNG_SRC_CONFIG_REG_OFFSET); - } - - tegra_se_crypto_register(se_dev); - - dev_info(se_dev->dev, "%s: complete", __func__); - - return 0; -aes_buf_alloc_fail: - nvhost_syncpt_put_ref_ext(se_dev->pdev, se_dev->syncpt_id); -reg_fail: - tegra_se_free_ll_buf(se_dev); -ll_alloc_fail: - if (se_dev->se_work_q) - destroy_workqueue(se_dev->se_work_q); - - return err; -} - -static int tegra_se_remove(struct platform_device *pdev) -{ - struct nvhost_device_data *pdata = platform_get_drvdata(pdev); - struct tegra_se_dev *se_dev = pdata->private_data; - struct device_node *node; - int i; - - if (!se_dev) { - pr_err("Device is NULL\n"); - return -ENODEV; - } - - tegra_se_boost_cpu_deinit(se_dev); - - node = of_node_get(se_dev->dev->of_node); - if (is_algo_supported(node, "drbg")) - crypto_unregister_rng(&rng_algs[0]); - - if (is_algo_supported(node, "xts")) - crypto_unregister_skcipher(&aes_algs[0]); - - if (is_algo_supported(node, "aes")) { - crypto_unregister_skcipher(&aes_algs[1]); - for (i = 2; i < ARRAY_SIZE(aes_algs); i++) - crypto_unregister_skcipher(&aes_algs[i]); - } - - if (is_algo_supported(node, "cmac")) - crypto_unregister_ahash(&hash_algs[0]); - - if (is_algo_supported(node, "sha")) { - for (i = 1; i < 6; i++) - crypto_unregister_ahash(&hash_algs[i]); - } - - if (is_algo_supported(node, "rsa")) { - crypto_unregister_akcipher(&rsa_alg); - crypto_unregister_kpp(&dh_algs[0]); - } - - if (is_algo_supported(node, "aead")) { - for (i = 0; i < 2; i++) - crypto_unregister_aead(&aead_algs[i]); - } - - tegra_se_free_ll_buf(se_dev); - kfree(se_dev->total_aes_buf); - - cancel_work_sync(&se_dev->se_work); - if (se_dev->se_work_q) - destroy_workqueue(se_dev->se_work_q); - - mutex_destroy(&se_dev->mtx); - nvhost_client_device_release(pdev); - mutex_destroy(&pdata->lock); - - return 0; -} - -static struct host1x_driver host1x_se_driver = { - .driver = { - .name = "tegra-se-host1x", - }, - .probe = host1x_se_probe, - .remove = host1x_se_remove, - .subdevs = tegra_se_of_match, -}; - -static struct platform_driver tegra_se_driver = { - .probe = tegra_se_probe, - .remove = tegra_se_remove, - .driver = { - .name = "tegra-se-nvhost", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(tegra_se_of_match), - .suppress_bind_attrs = true, - }, -}; - -static int __init tegra_se_module_init(void) -{ - int err; - - err = host1x_driver_register(&host1x_se_driver); - if (err < 0) - return err; - - err = platform_driver_register(&tegra_se_driver); - if (err < 0) { - host1x_driver_unregister(&host1x_se_driver); - return err; - } - - return 0; -} - -static void __exit tegra_se_module_exit(void) -{ - platform_driver_unregister(&tegra_se_driver); -} - -late_initcall(tegra_se_module_init); -module_exit(tegra_se_module_exit); - -MODULE_DESCRIPTION("Tegra Crypto algorithm support using Host1x Interface"); -MODULE_AUTHOR("NVIDIA Corporation"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("tegra-se-nvhost"); diff --git a/drivers/crypto/tegra-se-nvhost.h b/drivers/crypto/tegra-se-nvhost.h deleted file mode 100644 index b21afc8c..00000000 --- a/drivers/crypto/tegra-se-nvhost.h +++ /dev/null @@ -1,510 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * - * Header file for Tegra Security Engine - */ - -#ifndef _CRYPTO_TEGRA_SE_H -#define _CRYPTO_TEGRA_SE_H - -#include -#include - -#define PFX "tegra-se-nvhost: " - -#define ENCRYPT 1 -#define DECRYPT 0 - -#define TEGRA_SE_CRA_PRIORITY 300 -#define TEGRA_SE_COMPOSITE_PRIORITY 400 -#define TEGRA_SE_CRYPTO_QUEUE_LENGTH 100 -#define SE_MAX_SRC_SG_COUNT 50 -#define SE_MAX_DST_SG_COUNT 50 - -#define TEGRA_SE_KEYSLOT_COUNT 16 -#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF - -/* SE register definitions */ -#define SE1_AES0_CONFIG_REG_OFFSET 0x204 -#define SE2_AES1_CONFIG_REG_OFFSET 0x404 - -#define SE_AES_CRYPTO_CONFIG_OFFSET 0x4 -#define SE_AES_IN_ADDR_OFFSET 0x8 -#define SE_AES_IN_ADDR_HI_OFFSET 0xC -#define SE_AES_OUT_ADDR_OFFSET 0x10 -#define SE_AES_OUT_ADDR_HI_OFFSET 0x14 -#define SE_AES_CRYPTO_LINEAR_CTR 0x18 -#define SE_AES_CRYPTO_LAST_BLOCK_OFFSET 0x28 -#define SE_AES_OPERATION_OFFSET 0x34 -#define SE_AES_CRYPTO_KEYTABLE_ADDR_OFFSET 0xB8 -#define SE_AES_CRYPTO_KEYTABLE_DATA_OFFSET 0xBC -#define SE_AES_CRYPTO_CTR_SPARE 0xE0 -#define SE_AES_CTR_LITTLE_ENDIAN 1 - -#define SE_CONFIG_ENC_ALG_SHIFT 12 -#define SE_CONFIG_DEC_ALG_SHIFT 8 -#define ALG_AES_ENC 1 -#define ALG_RNG 2 -#define ALG_SHA 3 -#define ALG_RSA 4 -#define ALG_NOP 0 -#define ALG_AES_DEC 1 -#define ALG_KEYFETCH 5 -#define ALG_HMAC 7 -#define ALG_KDF 8 -#define ALG_INS 13 -#define SE_CONFIG_ENC_ALG(x) (x << SE_CONFIG_ENC_ALG_SHIFT) -#define SE_CONFIG_DEC_ALG(x) (x << SE_CONFIG_DEC_ALG_SHIFT) -#define SE_CONFIG_DST_SHIFT 2 -#define DST_MEMORY 0 -#define DST_HASHREG 1 -#define DST_KEYTAB 2 -#define DST_SRK 3 -#define DST_RSAREG 4 -#define SE_CONFIG_DST(x) (x << SE_CONFIG_DST_SHIFT) -#define SE_CONFIG_ENC_MODE_SHIFT 24 -#define SE_CONFIG_DEC_MODE_SHIFT 16 -#define MODE_KEY128 0 -#define MODE_KEY192 1 -#define MODE_KEY256 2 -#define MODE_GMAC 3 -#define MODE_GCM 4 -#define MODE_GCM_FINAL 5 -#define MODE_CMAC 7 - -#define MODE_SHA1 0 -#define MODE_SHA224 4 -#define MODE_SHA256 5 -#define MODE_SHA384 6 -#define MODE_SHA512 7 -#define MODE_SHA3_224 9 -#define MODE_SHA3_256 10 -#define MODE_SHA3_384 11 -#define MODE_SHA3_512 12 -#define MODE_SHAKE128 13 -#define MODE_SHAKE256 14 -#define MODE_HMAC_SHA256_1KEY 0 -#define MODE_HMAC_SHA256_2KEY 1 -#define SE_CONFIG_ENC_MODE(x) (x << SE_CONFIG_ENC_MODE_SHIFT) -#define SE_CONFIG_DEC_MODE(x) (x << SE_CONFIG_DEC_MODE_SHIFT) - -#define SE_RNG_CONFIG_REG_OFFSET 0x234 -#define DRBG_MODE_SHIFT 0 -#define DRBG_MODE_NORMAL 0 -#define DRBG_MODE_FORCE_INSTANTION 1 -#define DRBG_MODE_FORCE_RESEED 2 -#define SE_RNG_CONFIG_MODE(x) (x << DRBG_MODE_SHIFT) - -#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x2d8 -#define DRBG_RO_ENT_SRC_SHIFT 1 -#define DRBG_RO_ENT_SRC_ENABLE 1 -#define DRBG_RO_ENT_SRC_DISABLE 0 -#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) (x << DRBG_RO_ENT_SRC_SHIFT) -#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0 -#define DRBG_RO_ENT_SRC_LOCK_ENABLE 1 -#define DRBG_RO_ENT_SRC_LOCK_DISABLE 0 -#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) (x << DRBG_RO_ENT_SRC_LOCK_SHIFT) - -#define DRBG_SRC_SHIFT 2 -#define DRBG_SRC_NONE 0 -#define DRBG_SRC_ENTROPY 1 -#define DRBG_SRC_LFSR 2 -#define SE_RNG_CONFIG_SRC(x) (x << DRBG_SRC_SHIFT) - -#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x2dc - -#define SE_KEYTABLE_REG_OFFSET 0x31c -#define SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL_SHIFT 3 -#define SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL(x) \ - (x << SE_CRYPTO_KEYIV_PKT_SUBKEY_SEL_SHIFT) -#define SUBKEY_SEL_KEY1 0 -#define SUBKEY_SEL_KEY2 1 -#define SE_KEYTABLE_SLOT_SHIFT 4 -#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT) -#define SE_KEYTABLE_QUAD_SHIFT 2 -#define QUAD_KEYS_128 0 -#define QUAD_KEYS_192 1 -#define QUAD_KEYS_256 1 -#define QUAD_ORG_IV 2 -#define QUAD_UPDTD_IV 3 -#define SE_KEYTABLE_QUAD(x) (x << SE_KEYTABLE_QUAD_SHIFT) -#define SE_KEYTABLE_OP_TYPE_SHIFT 9 -#define OP_READ 0 -#define OP_WRITE 1 -#define SE_KEYTABLE_OP_TYPE(x) (x << SE_KEYTABLE_OP_TYPE_SHIFT) -#define SE_KEYTABLE_TABLE_SEL_SHIFT 8 -#define TABLE_KEYIV 0 -#define TABLE_SCHEDULE 1 -#define SE_KEYTABLE_TABLE_SEL(x) (x << SE_KEYTABLE_TABLE_SEL_SHIFT) -#define SE_KEYTABLE_PKT_SHIFT 0 -#define SE_KEYTABLE_PKT(x) (x << SE_KEYTABLE_PKT_SHIFT) - -#define SE_OP_DONE_SHIFT 4 -#define OP_DONE 1 -#define SE_OP_DONE(x, y) ((x) && (y << SE_OP_DONE_SHIFT)) - -#define SE_CRYPTO_HASH_SHIFT 0 -#define HASH_DISABLE 0 -#define HASH_ENABLE 1 -#define SE_CRYPTO_HASH(x) (x << SE_CRYPTO_HASH_SHIFT) - -#define SE4_SHA_IN_ADDR_OFFSET 0x8 -#define SE4_SHA_TASK_CONFIG 0x108 -#define HW_INIT_HASH_DISABLE 0 -#define HW_INIT_HASH_ENABLE 1 -#define SE4_HW_INIT_HASH_SHIFT 0 -#define SE4_HW_INIT_HASH(x) (x << SE4_HW_INIT_HASH_SHIFT) - -#define SE_CRYPTO_XOR_POS_SHIFT 1 -#define XOR_BYPASS 0 -#define XOR_BOTH 1 -#define XOR_TOP 2 -#define XOR_BOTTOM 3 -#define SE_CRYPTO_XOR_POS(x) (x << SE_CRYPTO_XOR_POS_SHIFT) -#define SE_CRYPTO_INPUT_SEL_SHIFT 3 -#define INPUT_MEMORY 0 -#define INPUT_RANDOM 1 -#define INPUT_AESOUT 2 -#define INPUT_LNR_CTR 3 -#define SE_CRYPTO_INPUT_SEL(x) (x << SE_CRYPTO_INPUT_SEL_SHIFT) -#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5 -#define VCTRAM_MEMORY 0 -#define VCTRAM_TWEAK 1 -#define VCTRAM_AESOUT 2 -#define VCTRAM_PREVAHB 3 -#define SE_CRYPTO_VCTRAM_SEL(x) (x << SE_CRYPTO_VCTRAM_SEL_SHIFT) -#define SE_CRYPTO_IV_SEL_SHIFT 7 -#define IV_ORIGINAL 0 -#define IV_UPDATED 1 -#define IV_REG 2 -#define SE_CRYPTO_IV_SEL(x) (x << SE_CRYPTO_IV_SEL_SHIFT) -#define SE_CRYPTO_CORE_SEL_SHIFT 9 -#define CORE_DECRYPT 0 -#define CORE_ENCRYPT 1 -#define SE_CRYPTO_CORE_SEL(x) (x << SE_CRYPTO_CORE_SEL_SHIFT) -#define SE_CRYPTO_KEY2_INDEX_SHIFT 28 -#define SE_CRYPTO_KEY2_INDEX(x) (x << SE_CRYPTO_KEY2_INDEX_SHIFT) -#define SE_CRYPTO_KEY_INDEX_SHIFT 24 -#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT) -#define SE_CRYPTO_CTR_CNTN_SHIFT 11 -#define SE_CRYPTO_CTR_CNTN(x) (x << SE_CRYPTO_CTR_CNTN_SHIFT) - -#define SE_CRYPTO_CTR_REG_COUNT 4 - -#define OP_START 1 -#define OP_RESTART_OUT 2 -#define OP_CTX_SAVE 3 -#define OP_RESTART_IN 4 -#define OP_RESTART_INOUT 5 -#define OP_DUMMY 6 -#define SE_OPERATION_OP_SHIFT 0 -#define SE_OPERATION_OP(x) (x << SE_OPERATION_OP_SHIFT) - -#define SE_OPERATION_LASTBUF_SHIFT 16 -#define SE_OPERATION_LASTBUF(x) (x << SE_OPERATION_LASTBUF_SHIFT) -#define LASTBUF_TRUE 1 -#define LASTBUF_FALSE 0 - -#define SE_OPERATION_WRSTALL_SHIFT 15 -#define SE_OPERATION_WRSTALL(x) (x << SE_OPERATION_WRSTALL_SHIFT) -#define WRSTALL_TRUE 1 -#define WRSTALL_FALSE 0 - -#define SE_OPERATION_FINAL_SHIFT 5 -#define SE_OPERATION_FINAL(x) (x << SE_OPERATION_FINAL_SHIFT) -#define FINAL_TRUE 1 -#define FINAL_FALSE 0 - -#define SE_OPERATION_INIT_SHIFT 4 -#define SE_OPERATION_INIT(x) (x << SE_OPERATION_INIT_SHIFT) -#define INIT_TRUE 1 -#define INIT_FALSE 0 - -#define SE_ADDR_HI_MSB_SHIFT 24 -#define SE_ADDR_HI_SZ_SHIFT 0 -#define SE_ADDR_HI_MSB(x) (x << SE_ADDR_HI_MSB_SHIFT) -#define MSB(x) ((x & 0xFF00000000) >> 32) -#define SE_ADDR_HI_SZ(x) (x << SE_ADDR_HI_SZ_SHIFT) - -#define SE_LAST_BLOCK_RESIDUAL_BITS_SHIFT 20 -#define SE_LAST_BLOCK_RESIDUAL_BITS(x) (x << SE_LAST_BLOCK_RESIDUAL_BITS_SHIFT) - -#define SE_BUFF_SIZE_MASK 0xFF000000 - -#define SE_MAX_TASKS_PER_SUBMIT 64 -#define SE_MAX_SUBMIT_CHAIN_SZ 10 -#define SE_WORD_SIZE_BYTES 4 - -#define SE_MAX_MEM_ALLOC 4194304 -#define SE_MAX_GATHER_BUF_SZ 32768 -#define SE_MAX_AESBUF_ALLOC (SE_MAX_MEM_ALLOC / SE_MAX_GATHER_BUF_SZ) -#define SE_MAX_AESBUF_TIMEOUT (20 * SE_MAX_AESBUF_ALLOC) - -/* FIXME: The below 2 macros should fine tuned - * based on discussions with CPU team - */ -#define SE_MAX_CMDBUF_TIMEOUT (200 * SE_MAX_SUBMIT_CHAIN_SZ) -#define SE_WAIT_UDELAY 500 /* micro seconds */ - -#define SE_KEYSLOT_TIMEOUT 100 -#define SE_KEYSLOT_MDELAY 1000 - -#define SE_INT_ENABLE_REG_OFFSET 0x88 -#define SE1_INT_ENABLE_SHIFT 1 -#define SE1_INT_ENABLE(x) (x << SE1_INT_ENABLE_SHIFT) -#define SE2_INT_ENABLE_SHIFT 0 -#define SE2_INT_ENABLE(x) (x << SE2_INT_ENABLE_SHIFT) -#define SE3_INT_ENABLE_SHIFT 2 -#define SE3_INT_ENABLE(x) (x << SE3_INT_ENABLE_SHIFT) -#define SE4_INT_ENABLE_SHIFT 3 -#define SE4_INT_ENABLE(x) (x << SE4_INT_ENABLE_SHIFT) - -#define INT_DISABLE 0 -#define INT_ENABLE 1 - -#define SE1_AES0_INT_ENABLE_OFFSET 0x2EC -#define SE2_AES1_INT_ENABLE_OFFSET 0x4EC -#define SE3_RSA_INT_ENABLE_OFFSET 0x754 -#define SE4_SHA_INT_ENABLE_OFFSET 0x180 - -#define SE1_AES0_INT_STATUS_REG_OFFSET 0x2F0 -#define SE2_AES1_INT_STATUS_REG_OFFSET 0x4F0 -#define SE3_RSA_INT_STATUS_REG_OFFSET 0x758 -#define SE4_SHA_INT_STATUS_REG_OFFSET 0x184 - -#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0X330 -#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0 -#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \ - (x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT) - -#define SE_KEYTABLE_QUAD_SIZE_BYTES 16 - -#define SE_SPARE_0_REG_OFFSET 0x80c - -#define TEGRA_SE_SHA_MAX_BLOCK_SIZE 128 - -#define SE4_SHA_CONFIG_REG_OFFSET 0x104 -#define SE_SHA_MSG_LENGTH_OFFSET 0x18 -#define SE_SHA_OPERATION_OFFSET 0x78 -#define SE_SHA_HASH_LENGTH 0xa8 - -#define SHA_DISABLE 0 -#define SHA_ENABLE 1 - -#define SE_HASH_RESULT_REG_OFFSET 0x13c -#define SE_CMAC_RESULT_REG_OFFSET 0x4c4 -#define T234_SE_CMAC_RESULT_REG_OFFSET 0x0c0 - -#define SE_STATIC_MEM_ALLOC_BUFSZ 512 - -#define TEGRA_SE_KEY_256_SIZE 32 -#define TEGRA_SE_KEY_512_SIZE 64 -#define TEGRA_SE_KEY_192_SIZE 24 -#define TEGRA_SE_KEY_128_SIZE 16 -#define TEGRA_SE_AES_BLOCK_SIZE 16 -#define TEGRA_SE_AES_MIN_KEY_SIZE 16 -#define TEGRA_SE_AES_MAX_KEY_SIZE 64 -#define TEGRA_SE_AES_IV_SIZE 16 -#define TEGRA_SE_RNG_IV_SIZE 16 -#define TEGRA_SE_RNG_DT_SIZE 16 -#define TEGRA_SE_RNG_KEY_SIZE 16 -#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \ - TEGRA_SE_RNG_KEY_SIZE + \ - TEGRA_SE_RNG_DT_SIZE) -#define TEGRA_SE_AES_CMAC_DIGEST_SIZE 16 -#define TEGRA_SE_AES_CBC_MAC_DIGEST_SIZE 16 -#define TEGRA_SE_RSA512_INPUT_SIZE 64 -#define TEGRA_SE_RSA1024_INPUT_SIZE 128 -#define TEGRA_SE_RSA1536_INPUT_SIZE 192 -#define TEGRA_SE_RSA2048_INPUT_SIZE 256 - -#define TEGRA_SE_AES_CMAC_STATE_SIZE 16 -#define SHA1_STATE_SIZE 20 -#define SHA224_STATE_SIZE 32 -#define SHA256_STATE_SIZE 32 -#define SHA384_STATE_SIZE 64 -#define SHA512_STATE_SIZE 64 -#define SHA3_224_STATE_SIZE 200 -#define SHA3_256_STATE_SIZE 200 -#define SHA3_384_STATE_SIZE 200 -#define SHA3_512_STATE_SIZE 200 - -#define TEGRA_SE_RSA_KEYSLOT_COUNT 4 -#define SE_RSA_OUTPUT 0x628 - -#define RSA_KEY_SLOT_ONE 0 -#define RSA_KEY_SLOT_TW0 1 -#define RSA_KEY_SLOT_THREE 2 -#define RSA_KEY_SLOT_FOUR 3 -#define RSA_KEY_NUM_SHIFT 7 -#define RSA_KEY_NUM(x) (x << RSA_KEY_NUM_SHIFT) - -#define RSA_KEY_TYPE_EXP 0 -#define RSA_KEY_TYPE_MOD 1 -#define RSA_KEY_TYPE_SHIFT 6 -#define RSA_KEY_TYPE(x) (x << RSA_KEY_TYPE_SHIFT) - -#define RSA_KEY_SLOT_SHIFT 23 -#define RSA_KEY_SLOT(x) (x << RSA_KEY_SLOT_SHIFT) - -#define SE3_RSA_CONFIG_REG_OFFSET 0x604 -#define SE_RSA_OPERATION_OFFSET 0x20 -#define SE_RSA_KEYTABLE_ADDR_OFFSET 0x148 -#define SE_RSA_KEYTABLE_DATA_OFFSET 0x14C - -#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0 -#define RSA_KEY_PKT_WORD_ADDR(x) (x << RSA_KEY_PKT_WORD_ADDR_SHIFT) - -#define SE_RSA_KEYTABLE_PKT_SHIFT 0 -#define SE_RSA_KEYTABLE_PKT(x) (x << SE_RSA_KEYTABLE_PKT_SHIFT) - -#define SE_MAGIC_PATTERN 0x4E56 -#define SE_STORE_KEY_IN_MEM 0x0001 -#define SE_SLOT_NUM_MASK 0xF000 -#define SE_SLOT_POSITION 12 -#define SE_KEY_LEN_MASK 0x3FF -#define SE_MAGIC_PATTERN_OFFSET 16 -#define SE_STREAMID_REG_OFFSET 0x90 - -#define SE_AES_CRYPTO_AAD_LENGTH_0_OFFSET 0x128 -#define SE_AES_CRYPTO_MSG_LENGTH_0_OFFSET 0x130 - -#define SE_AES_GCM_GMAC_SIZE 16 - -/* Key manifest */ -#define SE_KEYMANIFEST_ORIGIN(x) (x << 0) - -#define SE_KEYMANIFEST_USER(x) (x << 4) -#define NS 3 - -#define SE_KEYMANIFEST_PURPOSE(x) (x << 8) -#define ENC 0 -#define CMAC 1 -#define HMAC 2 -#define KW 3 -#define KUW 4 -#define KWUW 5 -#define KDK 6 -#define KDD 7 -#define KDD_KUW 8 -#define XTS 9 -#define GCM 10 - -#define SE_KEYMANIFEST_SIZE(x) (x << 14) -#define KEY128 0 -#define KEY192 1 -#define KEY256 2 - -#define SE_KEYMANIFEST_EX(x) (x << 12) - -#define SE_AES_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET 0x110 - -#define SE_AES_CRYPTO_KEYTABLE_DST_OFFSET 0x2c - -#define SE_AES_KEY_INDEX(x) (x << 8) - -#define SE_SHA_CRYPTO_KEYTABLE_KEYMANIFEST_OFFSET 0x98 -#define SE_SHA_CRYPTO_KEYTABLE_DST_OFFSET 0xa4 -#define SE_SHA_CRYPTO_KEYTABLE_ADDR_OFFSET 0x90 -#define SE_SHA_CRYPTO_KEYTABLE_DATA_OFFSET 0x94 - -/* cdma opcodes */ -#if 0 -static inline u32 nvhost_opcode_setclass( - unsigned class_id, unsigned offset, unsigned mask) -{ - return (0 << 28) | (offset << 16) | (class_id << 6) | mask; -} - -static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count) -{ - return (2 << 28) | (offset << 16) | count; -} - -static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask) -{ - return (3 << 28) | (offset << 16) | mask; -} - -static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value) -{ - return (4 << 28) | (offset << 16) | value; -} - -static inline u32 nvhost_opcode_restart(unsigned address) -{ - return (5 << 28) | (address >> 4); -} - -static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | count; -} - -static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; -} - -static inline u32 nvhost_opcode_gather_insert(unsigned offset, unsigned incr, - unsigned count) -{ - return (6 << 28) | (offset << 16) | BIT(15) | (incr << 14) | count; -} - -static inline u32 nvhost_opcode_setstreamid(unsigned streamid) -{ - return (7 << 28) | streamid; -} - -static inline u32 nvhost_opcode_setpayload(unsigned payload) -{ - return (9 << 28) | payload; -} - -static inline u32 nvhost_opcode_incr_w(unsigned int offset) -{ - /* 20-bit offset supported */ - return (10 << 28) | offset; -} - -static inline u32 nvhost_opcode_nonincr_w(unsigned int offset) -{ - /* 20-bit offset supported */ - return (11 << 28) | offset; -} - -static inline u32 nvhost_opcode_acquire_mlock(unsigned id) -{ - return (14 << 28) | id; -} - -static inline u32 nvhost_opcode_release_mlock(unsigned id) -{ - return (14 << 28) | (1 << 24) | id; -} - -static inline u32 nvhost_class_host_incr_syncpt_base( - unsigned base_indx, unsigned offset) -{ - return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx) - | host1x_uclass_incr_syncpt_base_offset_f(offset); -} - -static inline u32 nvhost_class_host_incr_syncpt( - unsigned cond, unsigned indx) -{ - return host1x_uclass_incr_syncpt_cond_f(cond) - | host1x_uclass_incr_syncpt_indx_f(indx); -} - -#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0) - -static inline u32 nvhost_mask2(unsigned x, unsigned y) -{ - return 1 | (1 << (y - x)); -} -#endif - -#endif /* _CRYPTO_TEGRA_SE_H */