aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
10 files changed, 1065 insertions, 323 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372b..b08403d7d1c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
- select CRYPTO_ALGAPI
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms"
depends on CRYPTO_DEV_PADLOCK
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
help
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on the Marvell Orion
+ and Kirkwood SoCs, such as QNAP's TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc884..6ffcb3f7f94 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c..a33243c17b0 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
}
}
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872..46e899ac924 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
-#include <crypto/internal/hash.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
- if (alg->cra_type == &crypto_ablkcipher_type)
+ switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ default:
tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- else if (alg->cra_type == &crypto_ahash_type)
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
+ break;
+ }
return 0;
}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto_alg *crypto_alg, int array_size)
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
return -ENOMEM;
alg->alg = crypto_alg[i];
- INIT_LIST_HEAD(&alg->alg.cra_list);
- if (alg->alg.cra_init == NULL)
- alg->alg.cra_init = crypto4xx_alg_init;
- if (alg->alg.cra_exit == NULL)
- alg->alg.cra_exit = crypto4xx_alg_exit;
alg->dev = sec_dev;
- rc = crypto_register_alg(&alg->alg);
+
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ rc = crypto_register_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ rc = crypto_register_alg(&alg->alg.u.cipher);
+ break;
+ }
+
if (rc) {
list_del(&alg->entry);
kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ crypto_unregister_alg(&alg->alg.u.cipher);
+ }
kfree(alg);
}
}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/**
* Supported Crypto Algorithms
*/
-struct crypto_alg crypto4xx_alg[] = {
+struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
- {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
.decrypt = crypto4xx_decrypt,
}
}
- },
- /* Hash SHA1 */
- {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_init = crypto4xx_sha1_alg_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ahash = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = crypto4xx_hash_init,
- .update = crypto4xx_hash_update,
- .final = crypto4xx_hash_final,
- .digest = crypto4xx_hash_digest,
- }
- }
- },
+ }},
};
/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef10344936..da9cbe3b9fc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <crypto/internal/hash.h>
+
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
u16 sa_len;
};
+struct crypto4xx_alg_common {
+ u32 type;
+ union {
+ struct crypto_alg cipher;
+ struct ahash_alg hash;
+ } u;
+};
+
struct crypto4xx_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct crypto4xx_alg_common alg;
struct crypto4xx_device *dev;
};
-#define crypto_alg_to_crypto4xx_alg(x) \
- container_of(x, struct crypto4xx_alg, alg)
+static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
+ struct crypto_alg *x)
+{
+ switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ return container_of(__crypto_ahash_alg(x),
+ struct crypto4xx_alg, alg.u.hash);
+ }
+
+ return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+}
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 00000000000..b21ef635f35
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPLv2
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int mv_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+ cp->sram_size = res->end - res->start + 1;
+ cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
+ cp->sram = ioremap(res->start, cp->sram_size);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int mv_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, cp->sram_size);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = mv_probe,
+ .remove = mv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(mv_crypto_init);
+
+static void __exit mv_crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(mv_crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 00000000000..c3e25d3bb17
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
+ * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
+ * someone forgot to remove it while switching to the core and moving to
+ * SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max ->max_req_size)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define SRAM_CFG_SPACE 0x50
+
+#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b6..76cb6b345e7 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/i387.h>
#include "padlock.h"
-#define SHA1_DEFAULT_FALLBACK "sha1-generic"
-#define SHA256_DEFAULT_FALLBACK "sha256-generic"
+struct padlock_sha_desc {
+ struct shash_desc fallback;
+};
struct padlock_sha_ctx {
- char *data;
- size_t used;
- int bypass;
- void (*f_sha_padlock)(const char *in, char *out, int count);
- struct hash_desc fallback;
+ struct crypto_shash *fallback;
};
-static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
-{
- return crypto_tfm_ctx(tfm);
-}
-
-/* We'll need aligned address on the stack */
-#define NEAREST_ALIGNED(ptr) \
- ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
-
-static struct crypto_alg sha1_alg, sha256_alg;
-
-static void padlock_sha_bypass(struct crypto_tfm *tfm)
+static int padlock_sha_init(struct shash_desc *desc)
{
- if (ctx(tfm)->bypass)
- return;
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
- crypto_hash_init(&ctx(tfm)->fallback);
- if (ctx(tfm)->data && ctx(tfm)->used) {
- struct scatterlist sg;
-
- sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
- }
-
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 1;
-}
-
-static void padlock_sha_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 0;
+ dctx->fallback.tfm = ctx->fallback;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_init(&dctx->fallback);
}
-static void padlock_sha_update(struct crypto_tfm *tfm,
- const uint8_t *data, unsigned int length)
+static int padlock_sha_update(struct shash_desc *desc,
+ const u8 *data, unsigned int length)
{
- /* Our buffer is always one page. */
- if (unlikely(!ctx(tfm)->bypass &&
- (ctx(tfm)->used + length > PAGE_SIZE)))
- padlock_sha_bypass(tfm);
-
- if (unlikely(ctx(tfm)->bypass)) {
- struct scatterlist sg;
- sg_init_one(&sg, (uint8_t *)data, length);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
- return;
- }
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
- ctx(tfm)->used += length;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_update(&dctx->fallback, data, length);
}
static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
-static void padlock_do_sha1(const char *in, char *out, int count)
+static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha1_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
+ space = SHA1_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buffer + leftover, in, count);
+ in = state.buffer;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
+
+ memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- ((uint32_t *)result)[0] = SHA1_H0;
- ((uint32_t *)result)[1] = SHA1_H1;
- ((uint32_t *)result)[2] = SHA1_H2;
- ((uint32_t *)result)[3] = SHA1_H3;
- ((uint32_t *)result)[4] = SHA1_H4;
-
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+
+out:
+ return err;
}
-static void padlock_do_sha256(const char *in, char *out, int count)
+static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ u8 buf[4];
+
+ return padlock_sha1_finup(desc, buf, 0, out);
+}
+
+static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha256_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
+ space = SHA256_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buf + leftover, in, count);
+ in = state.buf;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
- ((uint32_t *)result)[0] = SHA256_H0;
- ((uint32_t *)result)[1] = SHA256_H1;
- ((uint32_t *)result)[2] = SHA256_H2;
- ((uint32_t *)result)[3] = SHA256_H3;
- ((uint32_t *)result)[4] = SHA256_H4;
- ((uint32_t *)result)[5] = SHA256_H5;
- ((uint32_t *)result)[6] = SHA256_H6;
- ((uint32_t *)result)[7] = SHA256_H7;
+ memcpy(result, &state.state, SHA256_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+
+out:
+ return err;
}
-static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
+static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{
- if (unlikely(ctx(tfm)->bypass)) {
- crypto_hash_final(&ctx(tfm)->fallback, out);
- ctx(tfm)->bypass = 0;
- return;
- }
+ u8 buf[4];
- /* Pass the input buffer to PadLock microcode... */
- ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
-
- ctx(tfm)->used = 0;
+ return padlock_sha256_finup(desc, buf, 0, out);
}
static int padlock_cra_init(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
- struct crypto_hash *fallback_tfm;
-
- /* For now we'll allocate one page. This
- * could eventually be configurable one day. */
- ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
- if (!ctx(tfm)->data)
- return -ENOMEM;
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm;
+ int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- free_page((unsigned long)(ctx(tfm)->data));
- return PTR_ERR(fallback_tfm);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
}
- ctx(tfm)->fallback.tfm = fallback_tfm;
+ ctx->fallback = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-}
-
-static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha1;
- return padlock_cra_init(tfm);
-}
-
-static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha256;
-
- return padlock_cra_init(tfm);
+out:
+ return err;
}
static void padlock_cra_exit(struct crypto_tfm *tfm)
{
- if (ctx(tfm)->data) {
- free_page((unsigned long)(ctx(tfm)->data));
- ctx(tfm)->data = NULL;
- }
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx(tfm)->fallback.tfm);
- ctx(tfm)->fallback.tfm = NULL;
+ crypto_free_shash(ctx->fallback);
}
-static struct crypto_alg sha1_alg = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
- .cra_init = padlock_sha1_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA1_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha1_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha1_finup,
+ .final = padlock_sha1_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
-static struct crypto_alg sha256_alg = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
- .cra_init = padlock_sha256_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA256_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha256_finup,
+ .final = padlock_sha256_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
return -ENODEV;
}
- rc = crypto_register_alg(&sha1_alg);
+ rc = crypto_register_shash(&sha1_alg);
if (rc)
goto out;
- rc = crypto_register_alg(&sha256_alg);
+ rc = crypto_register_shash(&sha256_alg);
if (rc)
goto out_unreg1;
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
return 0;
out_unreg1:
- crypto_unregister_alg(&sha1_alg);
+ crypto_unregister_shash(&sha1_alg);
out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc;
@@ -293,8 +296,8 @@ out:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&sha1_alg);
- crypto_unregister_alg(&sha256_alg);
+ crypto_unregister_shash(&sha1_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce..c47ffe8a73e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
void *context;
};
+/* per-channel fifo management */
+struct talitos_channel {
+ /* request fifo */
+ struct talitos_request *fifo;
+
+ /* number of requests pending in channel h/w fifo */
+ atomic_t submit_count ____cacheline_aligned;
+
+ /* request submission (head) lock */
+ spinlock_t head_lock ____cacheline_aligned;
+ /* index to next free descriptor request */
+ int head;
+
+ /* request release (tail) lock */
+ spinlock_t tail_lock ____cacheline_aligned;
+ /* index to next in-progress/done descriptor request */
+ int tail;
+};
+
struct talitos_private {
struct device *dev;
struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
/* SEC Compatibility info */
unsigned long features;
- /* next channel to be assigned next incoming descriptor */
- atomic_t last_chan;
-
- /* per-channel number of requests pending in channel h/w fifo */
- atomic_t *submit_count;
-
- /* per-channel request fifo */
- struct talitos_request **fifo;
-
/*
* length of the request fifo
* fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
*/
unsigned int fifo_len;
- /* per-channel index to next free descriptor request */
- int *head;
-
- /* per-channel index to next in-progress/done descriptor request */
- int *tail;
+ struct talitos_channel *chan;
- /* per-channel request submission (head) and release (tail) locks */
- spinlock_t *head_lock;
- spinlock_t *tail_lock;
+ /* next channel to be assigned next incoming descriptor */
+ atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
+{
+ talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
+ talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
+}
+
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
unsigned char extent,
enum dma_data_direction dir)
{
+ dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
+
talitos_ptr->len = cpu_to_be16(len);
- talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
+ to_talitos_ptr(talitos_ptr, dma_addr);
talitos_ptr->j_extent = extent;
}
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
return -EIO;
}
- /* set done writeback and IRQ */
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
- TALITOS_CCCR_LO_CDIE);
+ /* set 36-bit addressing, done writeback enable and done IRQ enable */
+ setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+ TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
/* emulate SEC's round-robin channel fifo polling scheme */
ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
- spin_lock_irqsave(&priv->head_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
- if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
+ if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
/* h/w fifo is full */
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EAGAIN;
}
- head = priv->head[ch];
- request = &priv->fifo[ch][head];
+ head = priv->chan[ch].head;
+ request = &priv->chan[ch].fifo[head];
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
request->context = context;
/* increment fifo head */
- priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
smp_wmb();
request->desc = desc;
/* GO! */
wmb();
- out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
+ out_be32(priv->reg + TALITOS_FF(ch),
+ cpu_to_be32(upper_32_bits(request->dma_desc)));
+ out_be32(priv->reg + TALITOS_FF_LO(ch),
+ cpu_to_be32(lower_32_bits(request->dma_desc)));
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EINPROGRESS;
}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
unsigned long flags;
int tail, status;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
- tail = priv->tail[ch];
- while (priv->fifo[ch][tail].desc) {
- request = &priv->fifo[ch][tail];
+ tail = priv->chan[ch].tail;
+ while (priv->chan[ch].fifo[tail].desc) {
+ request = &priv->chan[ch].fifo[tail];
/* descriptors with their done bits set don't get the error */
rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
request->desc = NULL;
/* increment fifo tail */
- priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
- atomic_dec(&priv->submit_count[ch]);
+ atomic_dec(&priv->chan[ch].submit_count);
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
if (error && !reset_ch && status == error)
return;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
- tail = priv->tail[ch];
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+ tail = priv->chan[ch].tail;
}
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
}
/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
static struct talitos_desc *current_desc(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
- int tail = priv->tail[ch];
+ int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
- while (priv->fifo[ch][tail].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
- if (tail == priv->tail[ch]) {
+ if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return NULL;
}
}
- return priv->fifo[ch][tail].desc;
+ return priv->chan[ch].fifo[tail].desc;
}
/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
int n_sg = sg_count;
while (n_sg--) {
- link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
+ to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
- unsigned int ivsize;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
int sg_count, ret;
int sg_link_tbl_len;
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
/* hmac data */
- map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
- sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
- DMA_TO_DEVICE);
+ map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
+ sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
/* cipher iv */
- ivsize = crypto_aead_ivsize(aead);
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE);
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
} else {
sg_link_tbl_len = cryptlen;
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[4],
+ sg_dma_address(areq->src));
}
}
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
- desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
+ to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
- link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents +
- edesc->dst_nents + 2);
-
+ to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
+ (edesc->src_nents + edesc->dst_nents + 2) *
+ sizeof(struct talitos_ptr));
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* first DWORD empty */
desc->ptr[0].len = 0;
- desc->ptr[0].ptr = 0;
+ to_talitos_ptr(&desc->ptr[0], 0);
desc->ptr[0].j_extent = 0;
/* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
} else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(areq->src));
}
}
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6].len = 0;
- desc->ptr[6].ptr = 0;
+ to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0;
ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- kfree(priv->submit_count);
- kfree(priv->tail);
- kfree(priv->head);
-
- if (priv->fifo)
- for (i = 0; i < priv->num_channels; i++)
- kfree(priv->fifo[i]);
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->chan[i].fifo)
+ kfree(priv->chan[i].fifo);
- kfree(priv->fifo);
- kfree(priv->head_lock);
- kfree(priv->tail_lock);
+ kfree(priv->chan);
if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
- priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->head_lock || !priv->tail_lock) {
- dev_err(dev, "failed to allocate fifo locks\n");
+ priv->chan = kzalloc(sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
+ if (!priv->chan) {
+ dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) {
- spin_lock_init(&priv->head_lock[i]);
- spin_lock_init(&priv->tail_lock[i]);
- }
-
- priv->fifo = kmalloc(sizeof(struct talitos_request *) *
- priv->num_channels, GFP_KERNEL);
- if (!priv->fifo) {
- dev_err(dev, "failed to allocate request fifo\n");
- err = -ENOMEM;
- goto err_out;
+ spin_lock_init(&priv->chan[i].head_lock);
+ spin_lock_init(&priv->chan[i].tail_lock);
}
priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
for (i = 0; i < priv->num_channels; i++) {
- priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
- if (!priv->fifo[i]) {
+ priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
+ if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
goto err_out;
}
}
- priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->submit_count) {
- dev_err(dev, "failed to allocate fifo submit count space\n");
- err = -ENOMEM;
- goto err_out;
- }
for (i = 0; i < priv->num_channels; i++)
- atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
+ atomic_set(&priv->chan[i].submit_count,
+ -(priv->chfifo_len - 1));
- priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- if (!priv->head || !priv->tail) {
- dev_err(dev, "failed to allocate request index space\n");
- err = -ENOMEM;
- goto err_out;
- }
+ dma_set_mask(dev, DMA_BIT_MASK(36));
/* reset and initialize the h/w */
err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfd..ff5a1450e14 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
+#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */