aboutsummaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 08:38:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 08:38:25 -0800
commiteba0e319c12fb098d66316a8eafbaaa9174a07c3 (patch)
treeb2703117db9e36bb3510654efd55361f61c54742 /arch/s390
parentdf8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e (diff)
parent15e7b4452b72ae890f2fcb027b4c4fa63a1c9a7a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (125 commits) [CRYPTO] twofish: Merge common glue code [CRYPTO] hifn_795x: Fixup container_of() usage [CRYPTO] cast6: inline bloat-- [CRYPTO] api: Set default CRYPTO_MINALIGN to unsigned long long [CRYPTO] tcrypt: Make xcbc available as a standalone test [CRYPTO] xcbc: Remove bogus hash/cipher test [CRYPTO] xcbc: Fix algorithm leak when block size check fails [CRYPTO] tcrypt: Zero axbuf in the right function [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation [CRYPTO] api: Include sched.h for cond_resched in scatterwalk.h [CRYPTO] salsa20-asm: Remove unnecessary dependency on CRYPTO_SALSA20 [CRYPTO] tcrypt: Add select of AEAD [CRYPTO] salsa20: Add x86-64 assembly version [CRYPTO] salsa20_i586: Salsa20 stream cipher algorithm (i586 version) [CRYPTO] gcm: Introduce rfc4106 [CRYPTO] api: Show async type [CRYPTO] chainiv: Avoid lock spinning where possible [CRYPTO] seqiv: Add select AEAD in Kconfig [CRYPTO] scatterwalk: Handle zero nbytes in scatterwalk_map_and_copy [CRYPTO] null: Allow setkey on digest_null ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/crypto/aes_s390.c227
1 files changed, 200 insertions, 27 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 512669691ad..46c97058ebe 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -6,6 +6,7 @@
* s390 Version:
* Copyright IBM Corp. 2005,2007
* Author(s): Jan Glauber (jang@de.ibm.com)
+ * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
*
* Derived from "crypto/aes_generic.c"
*
@@ -16,17 +17,13 @@
*
*/
+#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include "crypt_s390.h"
-#define AES_MIN_KEY_SIZE 16
-#define AES_MAX_KEY_SIZE 32
-
-/* data block size for all key lengths */
-#define AES_BLOCK_SIZE 16
-
#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
#define AES_KEYLEN_256 4
@@ -39,45 +36,89 @@ struct s390_aes_ctx {
long enc;
long dec;
int key_len;
+ union {
+ struct crypto_blkcipher *blk;
+ struct crypto_cipher *cip;
+ } fallback;
};
-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
+/*
+ * Check if the key_len is supported by the HW.
+ * Returns 0 if it is, a positive number if it is not and software fallback is
+ * required or a negative number in case the key size is not valid
+ */
+static int need_fallback(unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
-
switch (key_len) {
case 16:
if (!(keylen_flag & AES_KEYLEN_128))
- goto fail;
+ return 1;
break;
case 24:
if (!(keylen_flag & AES_KEYLEN_192))
- goto fail;
-
+ return 1;
break;
case 32:
if (!(keylen_flag & AES_KEYLEN_256))
- goto fail;
+ return 1;
break;
default:
- goto fail;
+ return -1;
break;
}
+ return 0;
+}
+
+static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret < 0) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
sctx->key_len = key_len;
- memcpy(sctx->key, in_key, key_len);
- return 0;
-fail:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ if (!ret) {
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
+ }
+
+ return setkey_fallback_cip(tfm, in_key, key_len);
}
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ if (unlikely(need_fallback(sctx->key_len))) {
+ crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
+ return;
+ }
+
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
@@ -98,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ if (unlikely(need_fallback(sctx->key_len))) {
+ crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
+ return;
+ }
+
switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
@@ -114,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
}
}
+static int fallback_init_cip(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ sctx->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(sctx->fallback.cip)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(sctx->fallback.blk);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_cip(struct crypto_tfm *tfm)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(sctx->fallback.cip);
+ sctx->fallback.cip = NULL;
+}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
@@ -125,6 +194,8 @@ static struct crypto_alg aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_init = fallback_init_cip,
+ .cra_exit = fallback_exit_cip,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
@@ -136,10 +207,70 @@ static struct crypto_alg aes_alg = {
}
};
+static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int fallback_blk_dec(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = sctx->fallback.blk;
+
+ ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int fallback_blk_enc(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = sctx->fallback.blk;
+
+ ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret > 0) {
+ sctx->key_len = key_len;
+ return setkey_fallback_blk(tfm, in_key, key_len);
+ }
switch (key_len) {
case 16:
@@ -188,6 +319,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
}
@@ -199,10 +333,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
}
+static int fallback_init_blk(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(sctx->fallback.blk)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(sctx->fallback.blk);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_blk(struct crypto_tfm *tfm)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_blkcipher(sctx->fallback.blk);
+ sctx->fallback.blk = NULL;
+}
+
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
@@ -214,6 +375,8 @@ static struct crypto_alg ecb_aes_alg = {
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -229,6 +392,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = need_fallback(key_len);
+ if (ret > 0) {
+ sctx->key_len = key_len;
+ return setkey_fallback_blk(tfm, in_key, key_len);
+ }
switch (key_len) {
case 16:
@@ -283,6 +453,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
}
@@ -294,6 +467,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(need_fallback(sctx->key_len)))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
}
@@ -309,6 +485,8 @@ static struct crypto_alg cbc_aes_alg = {
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -336,14 +514,10 @@ static int __init aes_init(void)
return -EOPNOTSUPP;
/* z9 109 and z9 BC/EC only support 128 bit key length */
- if (keylen_flag == AES_KEYLEN_128) {
- aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE;
- ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
- cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
+ if (keylen_flag == AES_KEYLEN_128)
printk(KERN_INFO
"aes_s390: hardware acceleration only available for"
"128 bit keys\n");
- }
ret = crypto_register_alg(&aes_alg);
if (ret)
@@ -382,4 +556,3 @@ MODULE_ALIAS("aes");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
-