From 03f5d8cedb31deb558cd97095730cbc8bc54b12a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 31 Dec 2006 10:42:06 +1100 Subject: [CRYPTO] api: Proc functions should be marked as unused The proc functions were incorrectly marked as used rather than unused. They may be unused if proc is disabled. Signed-off-by: Herbert Xu --- crypto/blkcipher.c | 2 +- crypto/hash.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index b5befe8c3a9..bf459179efe 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -386,7 +386,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) } static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute_used__; + __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); diff --git a/crypto/hash.c b/crypto/hash.c index 12c4514f347..4ccd22deef3 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -41,7 +41,7 @@ static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) } static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) - __attribute_used__; + __attribute__ ((unused)); static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : hash\n"); -- cgit v1.2.3 From 32e3983fe590ac4cd70c7728eb330d43cef031a7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 24 Mar 2007 14:35:34 +1100 Subject: [CRYPTO] api: Add async block cipher interface This patch adds the frontend interface for asynchronous block ciphers. In addition to the usual block cipher parameters, there is a callback function pointer and a data pointer. The callback will be invoked only if the encrypt/decrypt handlers return -EINPROGRESS. In other words, if the return value of zero the completion handler (or the equivalent code) needs to be invoked by the caller. The request structure is allocated and freed by the caller. Its size is determined by calling crypto_ablkcipher_reqsize(). The helpers ablkcipher_request_alloc/ablkcipher_request_free can be used to manage the memory for a request. Signed-off-by: Herbert Xu --- crypto/blkcipher.c | 70 +++++++++++++++-- include/linux/crypto.h | 199 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 260 insertions(+), 9 deletions(-) diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index bf459179efe..8edf40c835a 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -349,13 +349,48 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, return cipher->setkey(tfm, key, keylen); } +static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); +} + +static int async_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + + return alg->encrypt(&desc, req->dst, req->src, req->nbytes); +} + +static int async_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + return alg->decrypt(&desc, req->dst, req->src, req->nbytes); +} + static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; - if (cipher->ivsize) { + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if ((type & mask) && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } @@ -363,16 +398,26 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, return len; } -static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) +{ + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + crt->setkey = async_setkey; + crt->encrypt = async_encrypt; + crt->decrypt = async_decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; - if (alg->ivsize > PAGE_SIZE / 8) - return -EINVAL; - crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; @@ -385,6 +430,21 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } +static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if (type & mask) + return crypto_init_blkcipher_ops_sync(tfm); + else + return crypto_init_blkcipher_ops_async(tfm); +} + static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 779aa78ee64..d4d05313280 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -56,6 +56,7 @@ #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 +#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 @@ -88,11 +89,37 @@ #endif struct scatterlist; +struct crypto_ablkcipher; +struct crypto_async_request; struct crypto_blkcipher; struct crypto_hash; struct crypto_tfm; struct crypto_type; +typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + + u32 flags; +}; + +struct ablkcipher_request { + struct crypto_async_request base; + + unsigned int nbytes; + + void *info; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + struct blkcipher_desc { struct crypto_blkcipher *tfm; void *info; @@ -232,6 +259,15 @@ static inline int crypto_has_alg(const char *name, u32 type, u32 mask) * crypto_free_*(), as well as the various helpers below. */ +struct ablkcipher_tfm { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + unsigned int ivsize; + unsigned int reqsize; +}; + struct blkcipher_tfm { void *iv; int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -290,6 +326,7 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; +#define crt_ablkcipher crt_u.ablkcipher #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash @@ -300,6 +337,7 @@ struct crypto_tfm { u32 crt_flags; union { + struct ablkcipher_tfm ablkcipher; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; @@ -311,6 +349,10 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; +struct crypto_ablkcipher { + struct crypto_tfm base; +}; + struct crypto_blkcipher { struct crypto_tfm base; }; @@ -411,6 +453,155 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) /* * API wrappers. */ +static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_ablkcipher *)tfm; +} + +static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( + const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_ablkcipher_cast( + crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_ablkcipher_tfm( + struct crypto_ablkcipher *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) +{ + crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); +} + +static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, + u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; +} + +static inline unsigned int crypto_ablkcipher_ivsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->ivsize; +} + +static inline unsigned int crypto_ablkcipher_blocksize( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_ablkcipher_alignmask( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); +} + +static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); +} + +static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen); +} + +static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) +{ + return __crypto_ablkcipher_cast(req->base.tfm); +} + +static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->encrypt(req); +} + +static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->decrypt(req); +} + +static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->reqsize; +} + +static inline struct ablkcipher_request *ablkcipher_request_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct ablkcipher_request *req; + + req = kmalloc(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + req->base.tfm = crypto_ablkcipher_tfm(tfm); + + return req; +} + +static inline void ablkcipher_request_free(struct ablkcipher_request *req) +{ + kfree(req); +} + +static inline void ablkcipher_request_set_callback( + struct ablkcipher_request *req, + u32 flags, crypto_completion_t complete, void *data) +{ + req->base.complete = complete; + req->base.data = data; + req->base.flags = flags; +} + +static inline void ablkcipher_request_set_crypt( + struct ablkcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + req->src = src; + req->dst = dst; + req->nbytes = nbytes; + req->info = iv; +} + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { @@ -427,9 +618,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } @@ -447,9 +638,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; return crypto_has_alg(alg_name, type, mask); } -- cgit v1.2.3 From 6158efc09016d3186263f6fd3a50667446ec4008 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 4 Apr 2007 17:41:07 +1000 Subject: [CRYPTO] tcrypt: Use async blkcipher interface This patch converts the tcrypt module to use the asynchronous block cipher interface. As all synchronous block ciphers can be used through the async interface, tcrypt is still able to test them. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 121 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 82 insertions(+), 39 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8eaa5aa210b..f0aed0106ad 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -57,6 +57,11 @@ #define ENCRYPT 1 #define DECRYPT 0 +struct tcrypt_result { + struct completion completion; + int err; +}; + static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; /* @@ -84,6 +89,17 @@ static void hexdump(unsigned char *buf, unsigned int len) printk("\n"); } +static void tcrypt_complete(struct crypto_async_request *req, int err) +{ + struct tcrypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + static void test_hash(char *algo, struct hash_testvec *template, unsigned int tcount) { @@ -203,15 +219,14 @@ static void test_cipher(char *algo, int enc, { unsigned int ret, i, j, k, temp; unsigned int tsize; - unsigned int iv_len; - unsigned int len; char *q; - struct crypto_blkcipher *tfm; + struct crypto_ablkcipher *tfm; char *key; struct cipher_testvec *cipher_tv; - struct blkcipher_desc desc; + struct ablkcipher_request *req; struct scatterlist sg[8]; const char *e; + struct tcrypt_result result; if (enc == ENCRYPT) e = "encryption"; @@ -232,15 +247,24 @@ static void test_cipher(char *algo, int enc, memcpy(tvmem, template, tsize); cipher_tv = (void *)tvmem; - tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); + init_completion(&result.completion); + + tfm = crypto_alloc_ablkcipher(algo, 0, 0); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } - desc.tfm = tfm; - desc.flags = 0; + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + printk("failed to allocate request for %s\n", algo); + goto out; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); j = 0; for (i = 0; i < tcount; i++) { @@ -249,17 +273,17 @@ static void test_cipher(char *algo, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - crypto_blkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - crypto_blkcipher_set_flags( + crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_blkcipher_setkey(tfm, key, - cipher_tv[i].klen); + ret = crypto_ablkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { printk("setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(tfm)); + crypto_ablkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -268,19 +292,28 @@ static void test_cipher(char *algo, int enc, sg_set_buf(&sg[0], cipher_tv[i].input, cipher_tv[i].ilen); - iv_len = crypto_blkcipher_ivsize(tfm); - if (iv_len) - crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, - iv_len); + ablkcipher_request_set_crypt(req, sg, sg, + cipher_tv[i].ilen, + cipher_tv[i].iv); - len = cipher_tv[i].ilen; ret = enc ? - crypto_blkcipher_encrypt(&desc, sg, sg, len) : - crypto_blkcipher_decrypt(&desc, sg, sg, len); + crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); - if (ret) { - printk("%s () failed flags=%x\n", e, - desc.flags); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !((ret = result.err))) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk("%s () failed err=%d\n", e, -ret); goto out; } @@ -303,17 +336,17 @@ static void test_cipher(char *algo, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - crypto_blkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - crypto_blkcipher_set_flags( + crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_blkcipher_setkey(tfm, key, - cipher_tv[i].klen); + ret = crypto_ablkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { printk("setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(tfm)); + crypto_ablkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -329,19 +362,28 @@ static void test_cipher(char *algo, int enc, cipher_tv[i].tap[k]); } - iv_len = crypto_blkcipher_ivsize(tfm); - if (iv_len) - crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, - iv_len); + ablkcipher_request_set_crypt(req, sg, sg, + cipher_tv[i].ilen, + cipher_tv[i].iv); - len = cipher_tv[i].ilen; ret = enc ? - crypto_blkcipher_encrypt(&desc, sg, sg, len) : - crypto_blkcipher_decrypt(&desc, sg, sg, len); + crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); - if (ret) { - printk("%s () failed flags=%x\n", e, - desc.flags); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !((ret = result.err))) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk("%s () failed err=%d\n", e, -ret); goto out; } @@ -360,7 +402,8 @@ static void test_cipher(char *algo, int enc, } out: - crypto_free_blkcipher(tfm); + crypto_free_ablkcipher(tfm); + ablkcipher_request_free(req); } static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, @@ -832,7 +875,7 @@ static void test_available(void) while (*name) { printk("alg %s ", *name); - printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ? + printk(crypto_has_alg(*name, 0, 0) ? "found\n" : "not found\n"); name++; } -- cgit v1.2.3 From ebc610e5bc76df073221e64e86c3f7533a09ea40 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 1 Jan 2007 18:37:02 +1100 Subject: [CRYPTO] templates: Pass type/mask when creating instances This patch passes the type/mask along when constructing instances of templates. This is in preparation for templates that may support multiple types of instances depending on what is requested. For example, the planned software async crypto driver will use this construct. For the moment this allows us to check whether the instance constructed is of the correct type and avoid returning success if the type does not match. Signed-off-by: Herbert Xu --- crypto/algapi.c | 42 ++++++++++++++++++++++++++++++++++++------ crypto/cbc.c | 11 ++++++++--- crypto/cryptomgr.c | 28 ++++++++++++++++++---------- crypto/ecb.c | 11 ++++++++--- crypto/hmac.c | 11 ++++++++--- crypto/lrw.c | 11 ++++++++--- crypto/pcbc.c | 11 ++++++++--- crypto/xcbc.c | 12 +++++++++--- include/crypto/algapi.h | 8 +++++--- include/linux/crypto.h | 9 +++++++++ 10 files changed, 117 insertions(+), 37 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index f7d2185b2c8..491205e11cb 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -425,15 +425,45 @@ int crypto_unregister_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); -struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, - u32 type, u32 mask) +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) { - struct rtattr *rta = param; + struct rtattr *rta = tb[CRYPTOA_TYPE - 1]; + struct crypto_attr_type *algt; + + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*algt)) + return ERR_PTR(-EINVAL); + + algt = RTA_DATA(rta); + + return algt; +} +EXPORT_SYMBOL_GPL(crypto_get_attr_type); + +int crypto_check_attr_type(struct rtattr **tb, u32 type) +{ + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ type) & algt->mask) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_check_attr_type); + +struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) +{ + struct rtattr *rta = tb[CRYPTOA_ALG - 1]; struct crypto_attr_alg *alga; - if (!RTA_OK(rta, len)) - return ERR_PTR(-EBADR); - if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga)) + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*alga)) return ERR_PTR(-EINVAL); alga = RTA_DATA(rta); diff --git a/crypto/cbc.c b/crypto/cbc.c index 136fea7e700..1f2649e13b4 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -275,13 +275,18 @@ static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 2ebffb84f1d..7a3df9b4121 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c @@ -26,14 +26,19 @@ struct cryptomgr_param { struct work_struct work; + struct rtattr *tb[CRYPTOA_MAX]; + + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + struct { struct rtattr attr; struct crypto_attr_alg data; } alg; struct { - u32 type; - u32 mask; char name[CRYPTO_MAX_ALG_NAME]; } larval; @@ -53,7 +58,7 @@ static void cryptomgr_probe(struct work_struct *work) goto err; do { - inst = tmpl->alloc(¶m->alg, sizeof(param->alg)); + inst = tmpl->alloc(param->tb); if (IS_ERR(inst)) err = PTR_ERR(inst); else if ((err = crypto_register_instance(tmpl, inst))) @@ -70,8 +75,8 @@ out: return; err: - crypto_larval_error(param->larval.name, param->larval.type, - param->larval.mask); + crypto_larval_error(param->larval.name, param->type.data.type, + param->type.data.mask); goto out; } @@ -82,7 +87,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) const char *p; unsigned int len; - param = kmalloc(sizeof(*param), GFP_KERNEL); + param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) goto err; @@ -94,7 +99,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) goto err_free_param; memcpy(param->template, name, len); - param->template[len] = 0; name = p + 1; for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) @@ -104,14 +108,18 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (!len || *p != ')' || p[1]) goto err_free_param; + param->type.attr.rta_len = sizeof(param->type); + param->type.attr.rta_type = CRYPTOA_TYPE; + param->type.data.type = larval->alg.cra_flags; + param->type.data.mask = larval->mask; + param->tb[CRYPTOA_TYPE - 1] = ¶m->type.attr; + param->alg.attr.rta_len = sizeof(param->alg); param->alg.attr.rta_type = CRYPTOA_ALG; memcpy(param->alg.data.name, name, len); - param->alg.data.name[len] = 0; + param->tb[CRYPTOA_ALG - 1] = ¶m->alg.attr; memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); - param->larval.type = larval->alg.cra_flags; - param->larval.mask = larval->mask; INIT_WORK(¶m->work, cryptomgr_probe); schedule_work(¶m->work); diff --git a/crypto/ecb.c b/crypto/ecb.c index 839a0aed8c2..6310387a872 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -115,13 +115,18 @@ static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/hmac.c b/crypto/hmac.c index 44187c5ee59..8802fb6dd5a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -197,13 +197,18 @@ static void hmac_free(struct crypto_instance *inst) kfree(inst); } -static struct crypto_instance *hmac_alloc(void *param, unsigned int len) +static struct crypto_instance *hmac_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/lrw.c b/crypto/lrw.c index b4105080ac7..621095db28b 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -228,13 +228,18 @@ static void exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *alloc(void *param, unsigned int len) +static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 5174d7fdad6..c3ed8a1c9f4 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -279,13 +279,18 @@ static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 53e8ccbf0f5..9f502b86e0e 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -288,12 +288,18 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *xcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *xcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 4e05e93ff68..d0c190b4d02 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -15,6 +15,7 @@ #include struct module; +struct rtattr; struct seq_file; struct crypto_type { @@ -38,7 +39,7 @@ struct crypto_template { struct hlist_head instances; struct module *module; - struct crypto_instance *(*alloc)(void *param, unsigned int len); + struct crypto_instance *(*alloc)(struct rtattr **tb); void (*free)(struct crypto_instance *inst); char name[CRYPTO_MAX_ALG_NAME]; @@ -96,8 +97,9 @@ void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); -struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, - u32 type, u32 mask); +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type); +struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d4d05313280..67830e7c2c3 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -372,12 +372,21 @@ struct crypto_hash { enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, + CRYPTOA_TYPE, + __CRYPTOA_MAX, }; +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; +struct crypto_attr_type { + u32 type; + u32 mask; +}; + /* * Transform user interface. */ -- cgit v1.2.3 From b5b7f08869340aa8cfa23303f7d195f161479592 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Apr 2007 20:48:54 +1000 Subject: [CRYPTO] api: Add async blkcipher type This patch adds the mid-level interface for asynchronous block ciphers. It also includes a generic queueing mechanism that can be used by other asynchronous crypto operations in future. Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 +++ crypto/Makefile | 1 + crypto/ablkcipher.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/algapi.c | 62 ++++++++++++++++++++++++++++++++++++ include/crypto/algapi.h | 58 ++++++++++++++++++++++++++++++++++ include/linux/crypto.h | 22 +++++++++++++ 6 files changed, 230 insertions(+) create mode 100644 crypto/ablkcipher.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 086fcec4472..a20a3f14d32 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -16,6 +16,10 @@ config CRYPTO_ALGAPI help This option provides the API for cryptographic algorithms. +config CRYPTO_ABLKCIPHER + tristate + select CRYPTO_BLKCIPHER + config CRYPTO_BLKCIPHER tristate select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 12f93f57817..3820d4c5ccc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -8,6 +8,7 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o crypto_algapi-objs := algapi.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o +obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o crypto_hash-objs := hash.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c new file mode 100644 index 00000000000..9348ddd84a5 --- /dev/null +++ b/crypto/ablkcipher.c @@ -0,0 +1,83 @@ +/* + * Asynchronous block chaining cipher operations. + * + * This is the asynchronous version of blkcipher.c indicating completion + * via a callback. + * + * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include + +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); + + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return cipher->setkey(tfm, key, keylen); +} + +static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, + u32 mask) +{ + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; + + seq_printf(m, "type : ablkcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); + seq_printf(m, "qlen : %u\n", ablkcipher->queue->qlen); + seq_printf(m, "max qlen : %u\n", ablkcipher->queue->max_qlen); +} + +const struct crypto_type crypto_ablkcipher_type = { + .ctxsize = crypto_ablkcipher_ctxsize, + .init = crypto_init_ablkcipher_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_ablkcipher_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 491205e11cb..1c2185b5b00 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -507,6 +507,68 @@ err_free_inst: } EXPORT_SYMBOL_GPL(crypto_alloc_instance); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) +{ + INIT_LIST_HEAD(&queue->list); + queue->backlog = &queue->list; + queue->qlen = 0; + queue->max_qlen = max_qlen; +} +EXPORT_SYMBOL_GPL(crypto_init_queue); + +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + int err = -EINPROGRESS; + + if (unlikely(queue->qlen >= queue->max_qlen)) { + err = -EBUSY; + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto out; + if (queue->backlog == &queue->list) + queue->backlog = &request->list; + } + + queue->qlen++; + list_add_tail(&request->list, &queue->list); + +out: + return err; +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request); + +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) +{ + struct list_head *request; + + if (unlikely(!queue->qlen)) + return NULL; + + queue->qlen--; + + if (queue->backlog != &queue->list) + queue->backlog = queue->backlog->next; + + request = queue->list.next; + list_del(request); + + return list_entry(request, struct crypto_async_request, list); +} +EXPORT_SYMBOL_GPL(crypto_dequeue_request); + +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) +{ + struct crypto_async_request *req; + + list_for_each_entry(req, &queue->list, list) { + if (req->tfm == tfm) + return 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index d0c190b4d02..469f511315c 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -13,6 +13,8 @@ #define _CRYPTO_ALGAPI_H #include +#include +#include struct module; struct rtattr; @@ -51,6 +53,14 @@ struct crypto_spawn { struct crypto_instance *inst; }; +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + struct scatter_walk { struct scatterlist *sg; unsigned int offset; @@ -82,6 +92,7 @@ struct blkcipher_walk { int flags; }; +extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; extern const struct crypto_type crypto_hash_type; @@ -103,6 +114,12 @@ struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); + int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); int blkcipher_walk_virt(struct blkcipher_desc *desc, @@ -125,6 +142,17 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -172,5 +200,35 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk, walk->total = nbytes; } +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg, + struct ablkcipher_request *request) +{ + return crypto_enqueue_request(alg->queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct ablkcipher_alg *alg) +{ + return ablkcipher_request_cast(crypto_dequeue_request(alg->queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue, + crypto_ablkcipher_tfm(tfm)); +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 67830e7c2c3..0ec2467891d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -93,6 +93,7 @@ struct crypto_ablkcipher; struct crypto_async_request; struct crypto_blkcipher; struct crypto_hash; +struct crypto_queue; struct crypto_tfm; struct crypto_type; @@ -143,6 +144,19 @@ struct hash_desc { * Algorithms: modular crypto algorithm implementations, managed * via crypto_register_alg() and crypto_unregister_alg(). */ +struct ablkcipher_alg { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + + struct crypto_queue *queue; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); @@ -197,6 +211,7 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest @@ -221,6 +236,7 @@ struct crypto_alg { const struct crypto_type *cra_type; union { + struct ablkcipher_alg ablkcipher; struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct digest_alg digest; @@ -572,6 +588,12 @@ static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) return crypto_ablkcipher_crt(tfm)->reqsize; } +static inline struct ablkcipher_request *ablkcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ablkcipher_request, base); +} + static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) { -- cgit v1.2.3 From cf02f5da9437201d57d93f529839dd40aac8b5f9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 29 Mar 2007 17:32:59 +1000 Subject: [CRYPTO] cryptomgr: Fix parsing of nested templates This patch allows the use of nested templates by allowing the use of brackets inside a template parameter. Signed-off-by: Herbert Xu --- crypto/cryptomgr.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 7a3df9b4121..6958ea83ee4 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c @@ -14,17 +14,17 @@ #include #include #include +#include #include #include #include #include #include -#include #include "internal.h" struct cryptomgr_param { - struct work_struct work; + struct task_struct *thread; struct rtattr *tb[CRYPTOA_MAX]; @@ -45,10 +45,9 @@ struct cryptomgr_param { char template[CRYPTO_MAX_ALG_NAME]; }; -static void cryptomgr_probe(struct work_struct *work) +static int cryptomgr_probe(void *data) { - struct cryptomgr_param *param = - container_of(work, struct cryptomgr_param, work); + struct cryptomgr_param *param = data; struct crypto_template *tmpl; struct crypto_instance *inst; int err; @@ -72,7 +71,7 @@ static void cryptomgr_probe(struct work_struct *work) out: kfree(param); - return; + module_put_and_exit(0); err: crypto_larval_error(param->larval.name, param->type.data.type, @@ -87,9 +86,12 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) const char *p; unsigned int len; + if (!try_module_get(THIS_MODULE)) + goto err; + param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) - goto err; + goto err_put_module; for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) ; @@ -101,11 +103,18 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) memcpy(param->template, name, len); name = p + 1; - for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) - ; + len = 0; + for (p = name; *p; p++) { + for (; isalnum(*p) || *p == '-' || *p == '_' || *p == '('; p++) + ; - len = p - name; - if (!len || *p != ')' || p[1]) + if (*p != ')') + goto err_free_param; + + len = p - name; + } + + if (!len || name[len + 1]) goto err_free_param; param->type.attr.rta_len = sizeof(param->type); @@ -121,13 +130,16 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); - INIT_WORK(¶m->work, cryptomgr_probe); - schedule_work(¶m->work); + param->thread = kthread_run(cryptomgr_probe, param, "cryptomgr"); + if (IS_ERR(param->thread)) + goto err_free_param; return NOTIFY_STOP; err_free_param: kfree(param); +err_put_module: + module_put(THIS_MODULE); err: return NOTIFY_OK; } -- cgit v1.2.3 From a73e69965fa2647faa36caf40f4132b9c99d61fd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 8 Apr 2007 21:31:36 +1000 Subject: [CRYPTO] api: Do not remove users unless new algorithm matches As it is whenever a new algorithm with the same name is registered users of the old algorithm will be removed so that they can take advantage of the new algorithm. This presents a problem when the new algorithm is not equivalent to the old algorithm. In particular, the new algorithm might only function on top of the existing one. Hence we should not remove users unless they can make use of the new algorithm. Signed-off-by: Herbert Xu --- crypto/algapi.c | 65 +++++++++++++++++++++++++++++-------------------- include/crypto/algapi.h | 3 ++- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 1c2185b5b00..f137a432061 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -84,36 +84,47 @@ static void crypto_destroy_instance(struct crypto_alg *alg) crypto_tmpl_put(tmpl); } -static void crypto_remove_spawns(struct list_head *spawns, - struct list_head *list) +static void crypto_remove_spawn(struct crypto_spawn *spawn, + struct list_head *list, + struct list_head *secondary_spawns) { - struct crypto_spawn *spawn, *n; + struct crypto_instance *inst = spawn->inst; + struct crypto_template *tmpl = inst->tmpl; - list_for_each_entry_safe(spawn, n, spawns, list) { - struct crypto_instance *inst = spawn->inst; - struct crypto_template *tmpl = inst->tmpl; + list_del_init(&spawn->list); + spawn->alg = NULL; - list_del_init(&spawn->list); - spawn->alg = NULL; + if (crypto_is_dead(&inst->alg)) + return; - if (crypto_is_dead(&inst->alg)) - continue; + inst->alg.cra_flags |= CRYPTO_ALG_DEAD; + if (!tmpl || !crypto_tmpl_get(tmpl)) + return; + + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); + list_move(&inst->alg.cra_list, list); + hlist_del(&inst->list); + inst->alg.cra_destroy = crypto_destroy_instance; + + list_splice(&inst->alg.cra_users, secondary_spawns); +} + +static void crypto_remove_spawns(struct list_head *spawns, + struct list_head *list, u32 new_type) +{ + struct crypto_spawn *spawn, *n; + LIST_HEAD(secondary_spawns); - inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (!tmpl || !crypto_tmpl_get(tmpl)) + list_for_each_entry_safe(spawn, n, spawns, list) { + if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) continue; - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); - list_move(&inst->alg.cra_list, list); - hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + crypto_remove_spawn(spawn, list, &secondary_spawns); + } - if (!list_empty(&inst->alg.cra_users)) { - if (&n->list == spawns) - n = list_entry(inst->alg.cra_users.next, - typeof(*n), list); - __list_splice(&inst->alg.cra_users, spawns->prev); - } + while (!list_empty(&secondary_spawns)) { + list_for_each_entry_safe(spawn, n, &secondary_spawns, list) + crypto_remove_spawn(spawn, list, &secondary_spawns); } } @@ -164,7 +175,7 @@ static int __crypto_register_alg(struct crypto_alg *alg, q->cra_priority > alg->cra_priority) continue; - crypto_remove_spawns(&q->cra_users, list); + crypto_remove_spawns(&q->cra_users, list, alg->cra_flags); } list_add(&alg->cra_list, &crypto_alg_list); @@ -214,7 +225,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); list_del_init(&alg->cra_list); - crypto_remove_spawns(&alg->cra_users, list); + crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); return 0; } @@ -351,11 +362,12 @@ err: EXPORT_SYMBOL_GPL(crypto_register_instance); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst) + struct crypto_instance *inst, u32 mask) { int err = -EAGAIN; spawn->inst = inst; + spawn->mask = mask; down_write(&crypto_alg_sem); if (!crypto_is_moribund(alg)) { @@ -494,7 +506,8 @@ struct crypto_instance *crypto_alloc_instance(const char *name, goto err_free_inst; spawn = crypto_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, inst); + err = crypto_init_spawn(spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 469f511315c..7847fc2a03f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -51,6 +51,7 @@ struct crypto_spawn { struct list_head list; struct crypto_alg *alg; struct crypto_instance *inst; + u32 mask; }; struct crypto_queue { @@ -103,7 +104,7 @@ void crypto_unregister_template(struct crypto_template *tmpl); struct crypto_template *crypto_lookup_template(const char *name); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst); + struct crypto_instance *inst, u32 mask); void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); -- cgit v1.2.3 From 124b53d020622ffa24e27406f2373d5a3debd0d3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Apr 2007 20:49:20 +1000 Subject: [CRYPTO] cryptd: Add software async crypto daemon This patch adds the cryptd module which is a template that takes a synchronous software crypto algorithm and converts it to an asynchronous one by executing it in a kernel thread. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++ crypto/Makefile | 1 + crypto/cryptd.c | 375 ++++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/algapi.h | 15 ++ 4 files changed, 400 insertions(+) create mode 100644 crypto/cryptd.c diff --git a/crypto/Kconfig b/crypto/Kconfig index a20a3f14d32..620e14cabdc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -175,6 +175,15 @@ config CRYPTO_LRW The first 128, 192 or 256 bits in the key are used for AES and the rest is used to tie each cipher block to its logical position. +config CRYPTO_CRYPTD + tristate "Software async crypto daemon" + select CRYPTO_ABLKCIPHER + select CRYPTO_MANAGER + help + This is a generic software asynchronous crypto daemon that + converts an arbitrary synchronous software crypto algorithm + into an asynchronous algorithm that executes in a kernel thread. + config CRYPTO_DES tristate "DES and Triple DES EDE cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 3820d4c5ccc..cce46a1c9dc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o +obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o diff --git a/crypto/cryptd.c b/crypto/cryptd.c new file mode 100644 index 00000000000..3ff4e1f0f03 --- /dev/null +++ b/crypto/cryptd.c @@ -0,0 +1,375 @@ +/* + * Software async crypto daemon. + * + * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CRYPTD_MAX_QLEN 100 + +struct cryptd_state { + spinlock_t lock; + struct mutex mutex; + struct crypto_queue queue; + struct task_struct *task; +}; + +struct cryptd_instance_ctx { + struct crypto_spawn spawn; + struct cryptd_state *state; +}; + +struct cryptd_blkcipher_ctx { + struct crypto_blkcipher *child; +}; + +struct cryptd_blkcipher_request_ctx { + crypto_completion_t complete; +}; + + +static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + return ictx->state; +} + +static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, + const u8 *key, unsigned int keylen) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_blkcipher *child = ctx->child; + int err; + + crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_blkcipher_setkey(child, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, + struct crypto_blkcipher *child, + int err, + int (*crypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int len)) +{ + struct cryptd_blkcipher_request_ctx *rctx; + struct blkcipher_desc desc; + + rctx = ablkcipher_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) { + rctx->complete(&req->base, err); + return; + } + + desc.tfm = child; + desc.info = req->info; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypt(&desc, req->dst, req->src, req->nbytes); + + req->base.complete = rctx->complete; + + local_bh_disable(); + req->base.complete(&req->base, err); + local_bh_enable(); +} + +static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); + struct crypto_blkcipher *child = ctx->child; + + cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, + crypto_blkcipher_crt(child)->encrypt); +} + +static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); + struct crypto_blkcipher *child = ctx->child; + + cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, + crypto_blkcipher_crt(child)->decrypt); +} + +static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, + crypto_completion_t complete) +{ + struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct cryptd_state *state = + cryptd_get_state(crypto_ablkcipher_tfm(tfm)); + int err; + + rctx->complete = req->base.complete; + req->base.complete = complete; + + spin_lock_bh(&state->lock); + err = ablkcipher_enqueue_request(crypto_ablkcipher_alg(tfm), req); + spin_unlock_bh(&state->lock); + + wake_up_process(state->task); + return err; +} + +static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) +{ + return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); +} + +static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) +{ + return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); +} + +static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_spawn *spawn = &ictx->spawn; + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_blkcipher *cipher; + + cipher = crypto_spawn_blkcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + tfm->crt_ablkcipher.reqsize = + sizeof(struct cryptd_blkcipher_request_ctx); + return 0; +} + +static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_state *state = cryptd_get_state(tfm); + int active; + + mutex_lock(&state->mutex); + active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm)); + mutex_unlock(&state->mutex); + + BUG_ON(active); + + crypto_free_blkcipher(ctx->child); +} + +static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, + struct cryptd_state *state) +{ + struct crypto_instance *inst; + struct cryptd_instance_ctx *ctx; + int err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (IS_ERR(inst)) + goto out; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto out_free_inst; + + ctx = crypto_instance_ctx(inst); + err = crypto_init_spawn(&ctx->spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (err) + goto out_free_inst; + + ctx->state = state; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.cra_priority = alg->cra_priority + 50; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + +out: + return inst; + +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_instance *cryptd_alloc_blkcipher( + struct rtattr **tb, struct cryptd_state *state) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = cryptd_alloc_instance(alg, state); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = &crypto_ablkcipher_type; + + inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; + inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; + + inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); + + inst->alg.cra_init = cryptd_blkcipher_init_tfm; + inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; + + inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; + inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; + inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; + + inst->alg.cra_ablkcipher.queue = &state->queue; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static struct cryptd_state state; + +static struct crypto_instance *cryptd_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_PTR(PTR_ERR(algt)); + + switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_BLKCIPHER: + return cryptd_alloc_blkcipher(tb, &state); + } + + return ERR_PTR(-EINVAL); +} + +static void cryptd_free(struct crypto_instance *inst) +{ + struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->spawn); + kfree(inst); +} + +static struct crypto_template cryptd_tmpl = { + .name = "cryptd", + .alloc = cryptd_alloc, + .free = cryptd_free, + .module = THIS_MODULE, +}; + +static inline int cryptd_create_thread(struct cryptd_state *state, + int (*fn)(void *data), const char *name) +{ + spin_lock_init(&state->lock); + mutex_init(&state->mutex); + crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); + + state->task = kthread_create(fn, state, name); + if (IS_ERR(state->task)) + return PTR_ERR(state->task); + + return 0; +} + +static inline void cryptd_stop_thread(struct cryptd_state *state) +{ + BUG_ON(state->queue.qlen); + kthread_stop(state->task); +} + +static int cryptd_thread(void *data) +{ + struct cryptd_state *state = data; + int stop; + + do { + struct crypto_async_request *req, *backlog; + + mutex_lock(&state->mutex); + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_bh(&state->lock); + backlog = crypto_get_backlog(&state->queue); + req = crypto_dequeue_request(&state->queue); + spin_unlock_bh(&state->lock); + + stop = kthread_should_stop(); + + if (stop || req) { + __set_current_state(TASK_RUNNING); + if (req) { + if (backlog) + backlog->complete(backlog, + -EINPROGRESS); + req->complete(req, 0); + } + } + + mutex_unlock(&state->mutex); + + schedule(); + } while (!stop); + + return 0; +} + +static int __init cryptd_init(void) +{ + int err; + + err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); + if (err) + return err; + + err = crypto_register_template(&cryptd_tmpl); + if (err) + kthread_stop(state.task); + + return err; +} + +static void __exit cryptd_exit(void) +{ + cryptd_stop_thread(&state); + crypto_unregister_template(&cryptd_tmpl); +} + +module_init(cryptd_init); +module_exit(cryptd_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software async crypto daemon"); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 7847fc2a03f..b2b1e6efd81 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -138,6 +138,12 @@ static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) return (void *)ALIGN(addr, align); } +static inline struct crypto_instance *crypto_tfm_alg_instance( + struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct crypto_instance, alg); +} + static inline void *crypto_instance_ctx(struct crypto_instance *inst) { return inst->__ctx; @@ -154,6 +160,15 @@ static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline struct crypto_blkcipher *crypto_spawn_blkcipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + + return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) { return crypto_tfm_ctx(&tfm->base); -- cgit v1.2.3 From e196d6259141eda47aeafd88514aae652bfbfc7f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 14 Apr 2007 16:09:14 +1000 Subject: [CRYPTO] api: Add ablkcipher_request_set_tfm This patch adds ablkcipher_request_set_tfm for those users that need to manage the memory for ablkcipher requests directly. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 0ec2467891d..0de7e2ace82 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -588,6 +588,12 @@ static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) return crypto_ablkcipher_crt(tfm)->reqsize; } +static inline void ablkcipher_request_set_tfm( + struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) +{ + req->base.tfm = crypto_ablkcipher_tfm(tfm); +} + static inline struct ablkcipher_request *ablkcipher_request_cast( struct crypto_async_request *req) { @@ -603,7 +609,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( crypto_ablkcipher_reqsize(tfm), gfp); if (likely(req)) - req->base.tfm = crypto_ablkcipher_tfm(tfm); + ablkcipher_request_set_tfm(req, tfm); return req; } -- cgit v1.2.3 From f6259deacfd55607ae57cff422d3bc7694ea14e7 Mon Sep 17 00:00:00 2001 From: Simon Arlott Date: Wed, 2 May 2007 22:08:26 +1000 Subject: [CRYPTO] padlock: Remove pointless padlock module When this is compiled in it is run too early to do anything useful: [ 6.052000] padlock: No VIA PadLock drivers have been loaded. [ 6.052000] padlock: Using VIA PadLock ACE for AES algorithm. [ 6.052000] padlock: Using VIA PadLock ACE for SHA1/SHA256 algorithms. When it's a module it isn't doing anything special, the same functionality can be provided in userspace by "probeall padlock padlock-aes padlock-sha" in modules.conf if it is required. Signed-off-by: Simon Arlott Cc: Michal Ludvig Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 16 +++---------- drivers/crypto/Makefile | 1 - drivers/crypto/padlock.c | 58 ------------------------------------------------ 3 files changed, 3 insertions(+), 72 deletions(-) delete mode 100644 drivers/crypto/padlock.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ff8c4beaace..f21fe66c9ee 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -1,10 +1,10 @@ menu "Hardware crypto devices" config CRYPTO_DEV_PADLOCK - tristate "Support for VIA PadLock ACE" + bool "Support for VIA PadLock ACE" depends on X86_32 select CRYPTO_ALGAPI - default m + default y help Some VIA processors come with an integrated crypto engine (so called VIA PadLock ACE, Advanced Cryptography Engine) @@ -14,16 +14,6 @@ config CRYPTO_DEV_PADLOCK The instructions are used only when the CPU supports them. Otherwise software encryption is used. - Selecting M for this option will compile a helper module - padlock.ko that should autoload all below configured - algorithms. Don't worry if your hardware does not support - some or all of them. In such case padlock.ko will - simply write a single line into the kernel log informing - about its failure but everything will keep working fine. - - If you are unsure, say M. The compiled module will be - called padlock.ko - config CRYPTO_DEV_PADLOCK_AES tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK @@ -55,7 +45,7 @@ source "arch/s390/crypto/Kconfig" config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" - depends on CRYPTO && X86_32 && PCI + depends on X86_32 && PCI select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER default m diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 6059cf86941..d070030f7d7 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,4 +1,3 @@ -obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c deleted file mode 100644 index d6d7dd5bb98..00000000000 --- a/drivers/crypto/padlock.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Cryptographic API. - * - * Support for VIA PadLock hardware crypto engine. - * - * Copyright (c) 2006 Michal Ludvig - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "padlock.h" - -static int __init padlock_init(void) -{ - int success = 0; - - if (crypto_has_cipher("aes-padlock", 0, 0)) - success++; - - if (crypto_has_hash("sha1-padlock", 0, 0)) - success++; - - if (crypto_has_hash("sha256-padlock", 0, 0)) - success++; - - if (!success) { - printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n"); - return -ENODEV; - } - - printk(KERN_NOTICE PFX "%d drivers are available.\n", success); - - return 0; -} - -static void __exit padlock_fini(void) -{ -} - -module_init(padlock_init); -module_exit(padlock_fini); - -MODULE_DESCRIPTION("Load all configured PadLock algorithms."); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Michal Ludvig"); - -- cgit v1.2.3