The Crypto Engine is an hardware cryptographic offloader present
on all recent Allwinner SoCs H3, R40, A64, H5, H6
This driver support also the Security System present on A80 and A83T.
This driver supports AES cipher in CTR/CBC/ECB/CTS mode.
drivers/crypto/allwinner/Kconfig | 26 +
drivers/crypto/allwinner/Makefile | 1 +
drivers/crypto/allwinner/sun8i-ce/Makefile | 2 +
.../allwinner/sun8i-ce/sun8i-ce-cipher.c | 376 +++++++++
.../crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 783 ++++++++++++++++++
drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 333 ++++++++
6 files changed, 1521 insertions(+)
create mode 100644 drivers/crypto/allwinner/Makefile
create mode 100644 drivers/crypto/allwinner/sun8i-ce/Makefile
create mode 100644 drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
create mode 100644 drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
create mode 100644 drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index ce194b26f43f..6e6ea65ca605 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -4,3 +4,29 @@ config CRYPTO_DEV_ALLWINNER
default y if ARCH_SUNXI
help
Say Y here to get to see options for Allwinner hardware crypto devices
+
+config CRYPTO_DEV_SUN8I_CE
+ tristate "Support for Allwinner Crypto Engine cryptographic accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ help
+ Select y here for having support for the crypto Engine availlable on
+ Allwinner SoC A80, A83T, H2+, H3, H5, H6, R40 and A64.
+ The Crypto Engine handle AES/3DES ciphers in ECB/CBC/CTS/CTR mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ce.
+
+config CRYPTO_DEV_SUN8I_CE_DEBUG
+ bool "Enabled sun8i-ce stats"
+ depends on CRYPTO_DEV_SUN8I_CE
+ depends on DEBUG_FS
+ help
+ Say y to enabled sun8i-ce debug stats.
+ This will create /sys/kernel/debug/sun8i-ce/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/allwinner/Makefile b/drivers/crypto/allwinner/Makefile
new file mode 100644
index 000000000000..11f02db9ee06
--- /dev/null
+++ b/drivers/crypto/allwinner/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
new file mode 100644
index 000000000000..08b68c3c1ca9
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
+sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
new file mode 100644
index 000000000000..a49075503949
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-cipher.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <
clabbe....@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ce.h"
+
+static int sun8i_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct ce_task *cet;
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+ bool need_fallback = false;
+ unsigned int todo, len;
+ int flow, i;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+
+ dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+ if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ need_fallback = true;
+
+ sg = areq->src;
+ while (sg && !need_fallback) {
+ if ((sg->length % 4) != 0)
+ need_fallback = true;
+ if ((sg_dma_len(sg) % 4) != 0)
+ need_fallback = true;
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ need_fallback = true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg && !need_fallback) {
+ if ((sg->length % 4) != 0)
+ need_fallback = true;
+ if ((sg_dma_len(sg) % 4) != 0)
+ need_fallback = true;
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ need_fallback = true;
+ sg = sg_next(sg);
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+
+ if (need_fallback) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+ }
+
+ flow = rctx->flow;
+
+ mutex_lock(&ss->chanlist[flow].lock);
+
+ cet = ss->chanlist[flow].tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = flow;
+ cet->t_common_ctl = ss->variant->alg_cipher[algt->ce_algo_id];
+ cet->t_common_ctl |= rctx->op_dir | CE_COMM_INT;
+ cet->t_dlen = areq->cryptlen / 4;
+ /* CTS and recent CE (H6) need length in bytes, in word otherwise */
+ if (algt->ce_blockmode == CE_ID_OP_CTS || ss->variant->model == CE_v2)
+ cet->t_dlen = areq->cryptlen;
+
+ cet->t_sym_ctl = ss->variant->op_mode[algt->ce_blockmode];
+ switch (op->keylen) {
+ case 128 / 8:
+ cet->t_sym_ctl |= CE_AES_128BITS;
+ break;
+ case 192 / 8:
+ cet->t_sym_ctl |= CE_AES_192BITS;
+ break;
+ case 256 / 8:
+ cet->t_sym_ctl |= CE_AES_256BITS;
+ break;
+ }
+ if (algt->ce_blockmode == CE_ID_OP_CTR)
+ cet->t_sym_ctl |= CE_CTR_128;
+ if (algt->ce_blockmode == CE_ID_OP_CTS)
+ cet->t_sym_ctl |= CE_CTS;
+ cet->t_asym_ctl = 0;
+
+ ss->chanlist[flow].op_mode = ss->variant->op_mode[algt->ce_blockmode];
+ if (algt->ce_blockmode == CE_ID_OP_CTR)
+ ss->chanlist[flow].op_mode |= SS_CTR_128;
+ ss->chanlist[flow].op_dir = rctx->op_dir;
+ ss->chanlist[flow].method = ss->variant->alg_cipher[algt->ce_algo_id];
+ ss->chanlist[flow].keylen = op->keylen;
+
+ cet->t_key = dma_map_single(ss->dev, op->key, op->keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, cet->t_key)) {
+ dev_err(ss->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ ss->chanlist[flow].bounce_iv = NULL;
+ ss->chanlist[flow].next_iv = NULL;
+ ss->chanlist[flow].ivlen = crypto_skcipher_ivsize(tfm);
+ ss->chanlist[flow].bounce_iv = kzalloc(ss->chanlist[flow].ivlen,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->chanlist[flow].bounce_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ ss->chanlist[flow].next_iv = kzalloc(ss->chanlist[flow].ivlen,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->chanlist[flow].next_iv) {
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ memcpy(ss->chanlist[flow].bounce_iv, areq->iv,
+ crypto_skcipher_ivsize(tfm));
+ }
+
+ if (in_sg == out_sg) {
+ nr_sgs = dma_map_sg(ss->dev, in_sg, sg_nents(in_sg),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ss->dev, in_sg, sg_nents(in_sg),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ss->dev, out_sg, sg_nents(out_sg),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(in_sg, sg, nr_sgs, i) {
+ cet->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SG(%d %u) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_src[i].len, todo);
+ len -= todo;
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(out_sg, sg, nr_sgd, i) {
+ cet->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ cet->t_dst[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SG(%d %u) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_dst[i].len, todo);
+ len -= todo;
+ }
+
+ err = sun8i_ce_run_task(ss, flow, "cipher");
+
+theend_sgs:
+ if (in_sg == out_sg) {
+ dma_unmap_sg(ss->dev, in_sg, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ss->dev, in_sg, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, out_sg, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (ss->chanlist[flow].next_iv) {
+ memcpy(areq->iv, ss->chanlist[flow].next_iv,
+ ss->chanlist[flow].ivlen);
+ memzero_explicit(ss->chanlist[flow].bounce_iv,
+ ss->chanlist[flow].ivlen);
+ memzero_explicit(ss->chanlist[flow].next_iv,
+ ss->chanlist[flow].ivlen);
+ }
+ kfree(ss->chanlist[flow].bounce_iv);
+ kfree(ss->chanlist[flow].next_iv);
+ ss->chanlist[flow].bounce_iv = NULL;
+ ss->chanlist[flow].next_iv = NULL;
+ }
+
+theend_key:
+ dma_unmap_single(ss->dev, cet->t_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+ mutex_unlock(&ss->chanlist[flow].lock);
+
+ return err;
+}
+
+static int handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int e = get_engine_number(op->ss);
+ struct crypto_engine *engine = op->ss->chanlist[e].engine;
+
+ rctx->op_dir = CE_DECRYPTION;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int e = get_engine_number(op->ss);
+ struct crypto_engine *engine = op->ss->chanlist[e].engine;
+
+ rctx->op_dir = CE_ENCRYPTION;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ op->ss = algt->ss;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ op->enginectx.op.do_one_request = handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmalloc(keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+ memcpy(op->key, key, keylen);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmalloc(keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+ memcpy(op->key, key, keylen);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
new file mode 100644
index 000000000000..35ef9cd7db02
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-core.c - hardware cryptographic accelerator for
+ * Allwinner H3/A64/H5/H2+/H6/A80/A83T/R40 SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <
clabbe....@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ce.h"
+
+static const struct ce_variant ce_h3_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, CE_OP_ECB, CE_OP_CBC, CE_OP_CTR,
+ CE_OP_CTS, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .intreg = CE_ISR,
+ .maxflow = 4,
+};
+
+static const struct ce_variant ce_h5_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, CE_OP_ECB, CE_OP_CBC, CE_OP_CTR,
+ CE_OP_CTS, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .intreg = CE_ISR,
+ .maxflow = 4,
+};
+
+static const struct ce_variant ce_h6_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, CE_OP_ECB, CE_OP_CBC, CE_OP_CTR,
+ CE_OP_CTS, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .model = CE_v2,
+ .intreg = CE_ISR,
+ .maxflow = 4,
+};
+
+static const struct ce_variant ce_a64_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, CE_OP_ECB, CE_OP_CBC, CE_OP_CTR,
+ CE_OP_CTS, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .intreg = CE_ISR,
+ .maxflow = 4,
+};
+
+static const struct ce_variant ce_r40_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, CE_OP_ECB, CE_OP_CBC, CE_OP_CTR,
+ CE_OP_CTS, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .intreg = CE_ISR,
+ .maxflow = 4,
+};
+
+static const struct ce_variant ce_a83t_variant = {
+ .alg_cipher = { CE_ID_NOTSUPP, SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, },
+ .op_mode = { CE_ID_NOTSUPP, SS_OP_ECB, SS_OP_CBC, CE_ID_NOTSUPP,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
+ .model = CE_SS,
+ .intreg = SS_INT_STA_REG,
+ .maxflow = 2,
+};
+
+int get_engine_number(struct sun8i_ce_dev *ss)
+{
+ return atomic_inc_return(&ss->flow) % ss->variant->maxflow;
+}
+
+static int sun8i_ss_run_task(struct sun8i_ce_dev *ss, int flow,
+ const char *name)
+{
+ int err = 0;
+ u32 v = 1;
+ struct ce_task *cet = ss->chanlist[flow].tl;
+ int i;
+ u32 *iv;
+
+ mutex_lock(&ss->mlock);
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= ss->chanlist[flow].op_mode;
+ v |= ss->chanlist[flow].method;
+
+ /* dir bit is different on SS */
+ if (ss->chanlist[flow].op_dir)
+ v |= SS_DECRYPTION;
+
+ switch (ss->chanlist[flow].keylen) {
+ case 128 / 8:
+ v |= CE_AES_128BITS << 7;
+ break;
+ case 192 / 8:
+ v |= CE_AES_192BITS << 7;
+ break;
+ case 256 / 8:
+ v |= CE_AES_256BITS << 7;
+ break;
+ }
+
+ /* enable INT for this flow */
+ writel(BIT(flow), ss->base + SS_INT_CTL_REG);
+
+ if (cet->t_key)
+ writel(cet->t_key, ss->base + SS_KEY_ADR_REG);
+
+ if (cet->t_iv)
+ writel(cet->t_iv, ss->base + SS_IV_ADR_REG);
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!cet->t_dst[i].addr)
+ break;
+ dev_dbg(ss->dev,
+ "Processing SG %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n",
+ i, name, v,
+ cet->t_src[i].len, cet->t_dst[i].len,
+ ss->chanlist[flow].method,
+ ss->chanlist[flow].op_mode,
+ ss->chanlist[flow].op_dir,
+ cet->t_src[i].len);
+
+ writel(cet->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(cet->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(cet->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->chanlist[flow].complete);
+ ss->chanlist[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ wait_for_completion_interruptible_timeout(&ss->chanlist[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->chanlist[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ err = -EINVAL;
+ goto theend;
+ }
+ }
+ /* copy next IV */
+ if (ss->chanlist[flow].next_iv) {
+ iv = ss->chanlist[flow].next_iv;
+ for (i = 0; i < 4; i++) {
+ if (flow)
+ *iv = readl(ss->base + SS_CTR_REG1 + i * 4);
+ else
+ *iv = readl(ss->base + SS_CTR_REG0 + i * 4);
+ iv++;
+ }
+ }
+theend:
+ mutex_unlock(&ss->mlock);
+
+ return err;
+}
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ss, int flow, const char *name)
+{
+ u32 v;
+ int err = 0;
+ struct ce_task *cet = ss->chanlist[flow].tl;
+
+ if (ss->chanlist[flow].bounce_iv) {
+ cet->t_iv = dma_map_single(ss->dev,
+ ss->chanlist[flow].bounce_iv,
+ ss->chanlist[flow].ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, cet->t_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ return -EFAULT;
+ }
+ }
+
+ if (ss->chanlist[flow].next_iv) {
+ cet->t_ctr = dma_map_single(ss->dev,
+ ss->chanlist[flow].next_iv,
+ ss->chanlist[flow].ivlen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, cet->t_ctr)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
+ goto err_next_iv;
+ }
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ss->chanlist[flow].stat_req++;
+#endif
+
+ if (ss->variant->model == CE_SS) {
+ err = sun8i_ss_run_task(ss, flow, name);
+ } else {
+ mutex_lock(&ss->mlock);
+
+ v = readl(ss->base + CE_ICR);
+ v |= 1 << flow;
+ writel(v, ss->base + CE_ICR);
+
+ reinit_completion(&ss->chanlist[flow].complete);
+ writel(ss->chanlist[flow].t_phy, ss->base + CE_TDQ);
+
+ ss->chanlist[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ v = 1 | (ss->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ writel(v, ss->base + CE_TLR);
+ mutex_unlock(&ss->mlock);
+
+ wait_for_completion_interruptible_timeout(&ss->chanlist[flow].complete,
+ msecs_to_jiffies(5000));
+
+ if (ss->chanlist[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ err = -EINVAL;
+ }
+ /* No need to lock for this read, the channel is locked so
+ * nothing could modify the error value for this channel
+ */
+ v = readl(ss->base + CE_ESR);
+ if (v) {
+ v >>= (flow * 4);
+ v &= 0xFF;
+ if (v) {
+ dev_err(ss->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ss->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ss->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ss->dev, "CE ERROR: keysram access error for AES\n");
+ if (v & CE_ERR_ADDR_INVALID)
+ dev_err(ss->dev, "CE ERROR: address invalid\n");
+ }
+ }
+
+ if (ss->chanlist[flow].next_iv) {
+ dma_unmap_single(ss->dev, cet->t_ctr,
+ ss->chanlist[flow].ivlen,
+ DMA_FROM_DEVICE);
+ }
+err_next_iv:
+ if (ss->chanlist[flow].bounce_iv) {
+ dma_unmap_single(ss->dev, cet->t_iv,
+ ss->chanlist[flow].ivlen,
+ DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sun8i_ce_dev *ss = (struct sun8i_ce_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ss->base + ss->variant->intreg);
+ for (flow = 0; flow < ss->variant->maxflow; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ss->base + ss->variant->intreg);
+ ss->chanlist[flow].status = 1;
+ complete(&ss->chanlist[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ss_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CTR,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CTS,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "cts(cbc-aes-sun8i-ce)",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ce_dev *ss = seq->private;
+ int i;
+
+ for (i = 0; i < ss->variant->maxflow; i++)
+ seq_printf(seq, "Channel %d: req %lu\n", i, ss->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ss = ss;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ce_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ce_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int sun8i_ce_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 v;
+ int err, i, ce_method, id, irq;
+ struct sun8i_ce_dev *ss;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ss->base)) {
+ err = PTR_ERR(ss->base);
+ dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
+ return err;
+ }
+
+ ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(ss->busclk)) {
+ err = PTR_ERR(ss->busclk);
+ dev_err(&pdev->dev, "Cannot get AHB CE clock err=%d\n", err);
+ return err;
+ }
+
+ ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(ss->ssclk)) {
+ err = PTR_ERR(ss->ssclk);
+ dev_err(&pdev->dev, "Cannot get CE clock err=%d\n", err);
+ return err;
+ }
+
+ if (ss->variant->model == CE_v2) {
+ ss->mbusclk = devm_clk_get(&pdev->dev, "mbus");
+ if (IS_ERR(ss->ssclk)) {
+ err = PTR_ERR(ss->mbusclk);
+ dev_err(&pdev->dev, "Cannot get MBUS CE clock err=%d\n", err);
+ return err;
+ }
+ }
+
+ /* Get Non Secure IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ss->dev, "Cannot get NS IRQ\n");
+ return irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
+ "sun8i-ce-ns", ss);
+ if (err < 0) {
+ dev_err(ss->dev, "Cannot request NS IRQ\n");
+ return err;
+ }
+
+ ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_info(&pdev->dev, "No reset control found\n");
+ ss->reset = NULL;
+ }
+
+ err = clk_prepare_enable(ss->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(ss->ssclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+ goto error_clk;
+ }
+
+ err = clk_prepare_enable(ss->mbusclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable MBUS clk\n");
+ goto error_clk;
+ }
+
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot deassert reset control\n");
+ goto error_ssclk;
+ }
+
+ v = readl(ss->base + CE_CTR);
+ v >>= 16;
+ v &= 0x07;
+ dev_info(&pdev->dev, "CE_NS Die ID %x\n", v);
+
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ mutex_init(&ss->mlock);
+
+ ss->chanlist = devm_kcalloc(ss->dev, ss->variant->maxflow,
+ sizeof(struct sun8i_ce_flow), GFP_KERNEL);
+ if (!ss->chanlist) {
+ err = -ENOMEM;
+ goto error_flow;
+ }
+
+ for (i = 0; i < ss->variant->maxflow; i++) {
+ init_completion(&ss->chanlist[i].complete);
+ mutex_init(&ss->chanlist[i].lock);
+
+ ss->chanlist[i].engine = crypto_engine_alloc_init(ss->dev, true);
+ if (!ss->chanlist[i].engine) {
+ dev_err(ss->dev, "Cannot allocate engine\n");
+ i--;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ss->chanlist[i].engine);
+ if (err) {
+ dev_err(ss->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ ss->chanlist[i].tl = dma_alloc_coherent(ss->dev,
+ sizeof(struct ce_task),
+ &ss->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!ss->chanlist[i].tl) {
+ dev_err(ss->dev, "Cannot get DMA memory for task %d\n",
+ i);
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ss->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ if (IS_ERR_OR_NULL(ss->dbgfs_dir)) {
+ dev_err(ss->dev, "Fail to create debugfs dir");
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ss->dbgfs_dir, ss,
+ &sun8i_ce_debugfs_fops);
+ if (IS_ERR_OR_NULL(ss->dbgfs_stats)) {
+ dev_err(ss->dev, "Fail to create debugfs stat");
+ err = -ENOMEM;
+ goto error_debugfs;
+ }
+#endif
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ss = ss;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ss->variant->alg_cipher[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ss = NULL;
+ break;
+ }
+ id = ce_algs[i].ce_blockmode;
+ ce_method = ss->variant->op_mode[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "DEBUG: Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ss = NULL;
+ goto error_alg;
+ }
+ break;
+ }
+ }
+
+ return 0;
+error_alg:
+ i--;
+ for (; i >= 0; i--) {
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ if (ce_algs[i].ss)
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+error_debugfs:
+ debugfs_remove_recursive(ss->dbgfs_dir);
+#endif
+ i = ss->variant->maxflow;
+error_engine:
+ while (i >= 0) {
+ crypto_engine_exit(ss->chanlist[i].engine);
+ if (ss->chanlist[i].tl)
+ dma_free_coherent(ss->dev, sizeof(struct ce_task),
+ ss->chanlist[i].tl,
+ ss->chanlist[i].t_phy);
+ i--;
+ }
+error_flow:
+ reset_control_assert(ss->reset);
+error_ssclk:
+ clk_disable_unprepare(ss->mbusclk);
+ clk_disable_unprepare(ss->ssclk);
+error_clk:
+ clk_disable_unprepare(ss->busclk);
+ return err;
+}
+
+static int sun8i_ce_remove(struct platform_device *pdev)
+{
+ int i, timeout;
+ struct sun8i_ce_dev *ss = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ if (ce_algs[i].ss)
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ debugfs_remove_recursive(ss->dbgfs_dir);
+#endif
+
+ for (i = 0; i < ss->variant->maxflow; i++) {
+ crypto_engine_exit(ss->chanlist[i].engine);
+ timeout = 0;
+ while (mutex_is_locked(&ss->chanlist[i].lock) && timeout < 10) {
+ dev_info(ss->dev, "Wait for %d %d\n", i, timeout);
+ timeout++;
+ msleep(20);
+ }
+ dma_free_coherent(ss->dev, sizeof(struct ce_task),
+ ss->chanlist[i].tl,
+ ss->chanlist[i].t_phy);
+ }
+
+ reset_control_assert(ss->reset);
+ clk_disable_unprepare(ss->busclk);
+ clk_disable_unprepare(ss->ssclk);
+ clk_disable_unprepare(ss->mbusclk);
+ return 0;
+}
+
+static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-h3-crypto",
+ .data = &ce_h3_variant },
+ { .compatible = "allwinner,sun50i-h5-crypto",
+ .data = &ce_h5_variant },
+ { .compatible = "allwinner,sun50i-h6-crypto",
+ .data = &ce_h6_variant },
+ { .compatible = "allwinner,sun50i-a64-crypto",
+ .data = &ce_a64_variant },
+ { .compatible = "allwinner,sun8i-r40-crypto",
+ .data = &ce_r40_variant },
+ { .compatible = "allwinner,sun8i-a83t-crypto",
+ .data = &ce_a83t_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
+
+static struct platform_driver sun8i_ce_driver = {
+ .probe = sun8i_ce_probe,
+ .remove = sun8i_ce_remove,
+ .driver = {
+ .name = "sun8i-ce",
+ .of_match_table = sun8i_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ce_driver);
+
+MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <
clabbe....@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
new file mode 100644
index 000000000000..53ca27ec60a8
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ce.h - hardware cryptographic accelerator for
+ * Allwinner H3/A64/H5/H2+/H6/A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <
clabbe....@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+/* CE Registers */
+#define CE_TDQ 0x00
+#define CE_CTR 0x04
+#define CE_ICR 0x08
+#define CE_ISR 0x0C
+#define CE_TLR 0x10
+#define CE_TSR 0x14
+#define CE_ESR 0x18
+#define CE_CSSGR 0x1C
+#define CE_CDSGR 0x20
+#define CE_CSAR 0x24
+#define CE_CDAR 0x28
+#define CE_TPR 0x2C
+
+/* Operation direction */
+#define SS_ENCRYPTION 0
+#define SS_DECRYPTION BIT(6)
+#define CE_ENCRYPTION 0
+#define CE_DECRYPTION BIT(8)
+
+/* CE Method H3/A64 */
+#define CE_ALG_AES 0
+#define CE_ALG_DES 1
+#define CE_ALG_3DES 2
+#define CE_ALG_MD5 16
+#define CE_ALG_SHA1 17
+#define CE_ALG_SHA224 18
+#define CE_ALG_SHA256 19
+#define CE_ALG_SHA384 20
+#define CE_ALG_SHA512 21
+#define CE_ALG_RSA 32
+#define CE_ALG_TRNG 48
+#define CE_ALG_PRNG 49
+#define CE_ALG_PRNGv2 0x1D
+
+#define CE_COMM_INT BIT(31)
+
+/* SS Method A83T */
+#define SS_ALG_AES 0
+#define SS_ALG_DES (1 << 2)
+#define SS_ALG_3DES (2 << 2)
+#define SS_ALG_MD5 (3 << 2)
+#define SS_ALG_PRNG (4 << 2)
+#define SS_ALG_TRNG (5 << 2)
+#define SS_ALG_SHA1 (6 << 2)
+#define SS_ALG_SHA224 (7 << 2)
+#define SS_ALG_SHA256 (8 << 2)
+#define SS_ALG_RSA (9 << 2)
+
+/* A80/A83T SS Registers */
+#define SS_CTL_REG 0x00
+#define SS_INT_CTL_REG 0x04
+#define SS_INT_STA_REG 0x08
+#define SS_KEY_ADR_REG 0x10
+#define SS_IV_ADR_REG 0x18
+#define SS_SRC_ADR_REG 0x20
+#define SS_DST_ADR_REG 0x28
+#define SS_LEN_ADR_REG 0x30
+#define SS_CTR_REG0 0x34
+#define SS_CTR_REG1 0x48
+
+#define CE_ID_NOTSUPP 0xFF
+
+#define CE_ID_CIPHER_AES 1
+#define CE_ID_CIPHER_DES 2
+#define CE_ID_CIPHER_DES3 3
+#define CE_ID_CIPHER_MAX 4
+
+#define CE_ID_OP_ECB 1
+#define CE_ID_OP_CBC 2
+#define CE_ID_OP_CTR 3
+#define CE_ID_OP_CTS 4
+#define CE_ID_OP_OFB 5
+#define CE_ID_OP_CFB 6
+#define CE_ID_OP_CBCMAC 7
+#define CE_ID_OP_MAX 8
+
+#define CE_AES_128BITS 0
+#define CE_AES_192BITS 1
+#define CE_AES_256BITS 2
+
+#define CE_OP_ECB 0
+#define CE_OP_CBC (1 << 8)
+#define CE_OP_CTR (2 << 8)
+#define CE_OP_CTS (3 << 8)
+
+#define SS_OP_ECB 0
+#define SS_OP_CBC (1 << 13)
+#define SS_OP_CTR (2 << 13)
+#define SS_OP_CTS (3 << 13)
+
+#define CE_CTR_128 (3 << 2)
+#define SS_CTR_128 (3 << 11)
+#define CE_CTS BIT(16)
+
+#define CE_ID_AKCIPHER_RSA 1
+#define CE_ID_AKCIPHER_MAX 2
+
+#define CE_ID_RSA_512 0
+#define CE_ID_RSA_1024 1
+#define CE_ID_RSA_2048 2
+#define CE_ID_RSA_3072 3
+#define CE_ID_RSA_4096 4
+#define CE_ID_RSA_MAX 5
+
+#define CE_OP_RSA_512 0
+#define CE_OP_RSA_1024 (1 << 28)
+#define CE_OP_RSA_2048 (2 << 28)
+#define CE_OP_RSA_3072 (3 << 28)
+#define CE_OP_RSA_4096 (4 << 28)
+
+#define SS_OP_RSA_512 0
+#define SS_OP_RSA_1024 (1 << 9)
+#define SS_OP_RSA_2048 (2 << 9)
+#define SS_OP_RSA_3072 (3 << 9)
+
+#define SS_FLOW0 BIT(30)
+#define SS_FLOW1 BIT(31)
+
+#define SS_RNG_CONTINUE BIT(18)
+
+#define TRNG_DATA_SIZE (256 / 8)
+#define PRNG_DATA_SIZE (160 / 8)
+#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+
+#define CE_ARBIT_IV BIT(16)
+#define SS_ARBIT_IV BIT(17)
+
+#define CE_ERR_ALGO_NOTSUP BIT(0)
+#define CE_ERR_DATALEN BIT(1)
+#define CE_ERR_KEYSRAM BIT(2)
+#define CE_ERR_ADDR_INVALID BIT(5)
+#define CE_ERR_KEYLADDER BIT(6)
+
+#define MAXCHAN 4
+#define MAX_SG 8
+
+#define CE_STD 0
+#define CE_SS 1
+#define CE_v2 2
+
+/*
+ * struct ce_variant - Describe CE capability for each variant hardware
+ * @alg_cipher: list of supported ciphers
+ * @op_mode: list of supported block modes
+ * @model: The minor variant CE_STD/CE_SS/CE_v2
+ * @intreg: reg offset for Interrupt register
+ * @maxflow: Numbers of flow for the current engine
+ * @prng: the ALG_ID of prng if supported
+ * @maxrsakeysize: The maximum size of RSA key supported
+ * @alg_akcipher: list of supported akciphers
+ * @rsa_op_mode: op_mode value for RSA keys
+ */
+struct ce_variant {
+ char alg_cipher[CE_ID_CIPHER_MAX];
+ u32 op_mode[CE_ID_OP_MAX];
+ int model;
+ u32 intreg;
+ unsigned int maxflow;
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+} __packed;
+
+/*
+ * struct ce_task - CE Task descriptor
+ * The structure of this descriptor could be found in the datasheet
+ */
+struct ce_task {
+ u32 t_id;
+ u32 t_common_ctl;
+ u32 t_sym_ctl;
+ u32 t_asym_ctl;
+ u32 t_key;
+ u32 t_iv;
+ u32 t_ctr;
+ u32 t_dlen;
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ u32 next;
+ u32 reserved[3];
+} __packed __aligned(8);
+
+/*
+ * struct sun8i_ce_flow - Information used by each flow
+ * @lock: lock protectin access of sun8i_ce_flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @bounce_iv: buffer which contain the IV
+ * @next_iv: buffer containing the next IV to use
+ * @ivlen: size of bounce_iv
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @method: current method for flow
+ * @op_dir: direction (encrypt vs decrypt) of this flow
+ * @op_mode: op_mode for this flow
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ce_flow {
+ struct mutex lock;
+ struct crypto_engine *engine;
+ void *bounce_iv;
+ void *next_iv;
+ unsigned int ivlen;
+ unsigned int keylen;
+ struct completion complete;
+ int status;
+ u32 method;
+ u32 op_dir;
+ u32 op_mode;
+ dma_addr_t t_phy;
+ struct ce_task *tl;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ce_dev - main container for all this driver information
+ * @base: base address of SS/CE
+ * @busclk: bus clock for SS/CE
+ * @ssclk: clock for SS/CE
+ * @mbusclk: An optional MBUS clock for CE
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ce_dev {
+ void __iomem *base;
+ struct clk *busclk;
+ struct clk *ssclk;
+ struct clk *mbusclk;
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ce_flow *chanlist;
+ atomic_t flow;
+ const struct ce_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ss: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ce_dev *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ss_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ce_algo_id: the CE_ID for this template
+ * @ce_blockmode: the type of block operation CE_ID
+ * @ss: pointer to the sun8i_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ss_alg_template {
+ u32 type;
+ u32 ce_algo_id;
+ u32 ce_blockmode;
+ struct sun8i_ce_dev *ss;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_skdecrypt(struct skcipher_request *areq);
+int sun8i_ce_skencrypt(struct skcipher_request *areq);
+
+int get_engine_number(struct sun8i_ce_dev *ss);
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ss, int flow, const char *name);
--
2.19.2