]> err.no Git - linux-2.6/commitdiff
[CRYPTO] padlock: Added block cipher versions of CBC/ECB
authorHerbert Xu <herbert@gondor.apana.org.au>
Mon, 21 Aug 2006 11:38:42 +0000 (21:38 +1000)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 21 Sep 2006 01:44:35 +0000 (11:44 +1000)
This patch adds block cipher algorithms for cbc(aes) and ecb(aes) for
the PadLock device.  Once all users to the old cipher type have been
converted the old cbc/ecb PadLock operations will be removed.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/Kconfig
drivers/crypto/padlock-aes.c
drivers/crypto/padlock.h

index 86c99cd333faeea737efeba0815c37535c75216b..adb554153f672a48d2d4d9f711513ef93d5ce9c8 100644 (file)
@@ -27,6 +27,7 @@ config CRYPTO_DEV_PADLOCK
 config CRYPTO_DEV_PADLOCK_AES
        tristate "PadLock driver for AES algorithm"
        depends on CRYPTO_DEV_PADLOCK
+       select CRYPTO_BLKCIPHER
        default m
        help
          Use VIA PadLock for AES algorithm.
index 3e683709243e4439fd7cc388f2d37dd5db108d27..f53301e836d9841e60ddf3854b9eb63ccef4e414 100644 (file)
  * ---------------------------------------------------------------------------
  */
 
+#include <crypto/algapi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <asm/byteorder.h>
@@ -297,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len)
        return 0;
 }
 
-static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+static inline struct aes_ctx *aes_ctx_common(void *ctx)
 {
-       unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
+       unsigned long addr = (unsigned long)ctx;
        unsigned long align = PADLOCK_ALIGNMENT;
 
        if (align <= crypto_tfm_ctx_alignment())
@@ -307,6 +307,16 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
        return (struct aes_ctx *)ALIGN(addr, align);
 }
 
+static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+{
+       return aes_ctx_common(crypto_tfm_ctx(tfm));
+}
+
+static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+{
+       return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+}
+
 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
@@ -507,6 +517,141 @@ static struct crypto_alg aes_alg = {
        }
 };
 
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->E, &ctx->cword.encrypt,
+                                  nbytes / AES_BLOCK_SIZE);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->D, &ctx->cword.decrypt,
+                                  nbytes / AES_BLOCK_SIZE);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static struct crypto_alg ecb_aes_alg = {
+       .cra_name               =       "ecb(aes)",
+       .cra_driver_name        =       "ecb-aes-padlock",
+       .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       AES_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct aes_ctx),
+       .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(ecb_aes_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       AES_MIN_KEY_SIZE,
+                       .max_keysize            =       AES_MAX_KEY_SIZE,
+                       .setkey                 =       aes_set_key,
+                       .encrypt                =       ecb_aes_encrypt,
+                       .decrypt                =       ecb_aes_decrypt,
+               }
+       }
+};
+
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
+                                           walk.dst.virt.addr, ctx->E,
+                                           walk.iv, &ctx->cword.encrypt,
+                                           nbytes / AES_BLOCK_SIZE);
+               memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+{
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->D, walk.iv, &ctx->cword.decrypt,
+                                  nbytes / AES_BLOCK_SIZE);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static struct crypto_alg cbc_aes_alg = {
+       .cra_name               =       "cbc(aes)",
+       .cra_driver_name        =       "cbc-aes-padlock",
+       .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       AES_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct aes_ctx),
+       .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(cbc_aes_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       AES_MIN_KEY_SIZE,
+                       .max_keysize            =       AES_MAX_KEY_SIZE,
+                       .ivsize                 =       AES_BLOCK_SIZE,
+                       .setkey                 =       aes_set_key,
+                       .encrypt                =       cbc_aes_encrypt,
+                       .decrypt                =       cbc_aes_decrypt,
+               }
+       }
+};
+
 static int __init padlock_init(void)
 {
        int ret;
@@ -522,18 +667,33 @@ static int __init padlock_init(void)
        }
 
        gen_tabs();
-       if ((ret = crypto_register_alg(&aes_alg))) {
-               printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
-               return ret;
-       }
+       if ((ret = crypto_register_alg(&aes_alg)))
+               goto aes_err;
+
+       if ((ret = crypto_register_alg(&ecb_aes_alg)))
+               goto ecb_aes_err;
+
+       if ((ret = crypto_register_alg(&cbc_aes_alg)))
+               goto cbc_aes_err;
 
        printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 
+out:
        return ret;
+
+cbc_aes_err:
+       crypto_unregister_alg(&ecb_aes_alg);
+ecb_aes_err:
+       crypto_unregister_alg(&aes_alg);
+aes_err:
+       printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
+       goto out;
 }
 
 static void __exit padlock_fini(void)
 {
+       crypto_unregister_alg(&cbc_aes_alg);
+       crypto_unregister_alg(&ecb_aes_alg);
        crypto_unregister_alg(&aes_alg);
 }
 
index 7e3385b0904db2a12f481e6b016878555efce46e..b728e4518bd1582958b20cc330bfe8d8400ba11b 100644 (file)
@@ -18,5 +18,6 @@
 #define PFX    "padlock: "
 
 #define PADLOCK_CRA_PRIORITY   300
+#define PADLOCK_COMPOSITE_PRIORITY 400
 
 #endif /* _CRYPTO_PADLOCK_H */