From 2b0b3689507ff8ba02fc8f2c50553442ca2b8810 Mon Sep 17 00:00:00 2001 From: ms Date: Sat, 9 Jun 2007 20:43:13 +0000 Subject: [PATCH] Padlock und DMA Patch fuer die VIA Boards hinzugefuegt. Ausgehende Firewall-Script gefixt. IMQ wieder im Kernel aktiviert. Leider nicht fuer XEN. VPN-Watch aktualisiert - noch nicht getestet. IPSec sollte nun auch funzen. git-svn-id: http://svn.ipfire.org/svn/ipfire/trunk@620 ea5c0bd1-69bd-2848-81d8-4f18e57aeed8 --- config/kernel/kernel.config.i586 | 17 +- config/kernel/kernel.config.i586.smp | 17 +- doc/packages-list.txt | 1 - lfs/linux | 8 +- lfs/openswan | 1 + lfs/shadow | 2 + src/initscripts/init.d/ipsec | 1 + src/misc-progs/outgoingfwctrl.c | 1 + src/patches/epia_dma.patch | 37 + src/patches/padlock-prereq-2.6.16.diff | 2913 ++++++++++++++++++++++++ src/scripts/vpn-watch | 410 ++-- 11 files changed, 3225 insertions(+), 183 deletions(-) create mode 100644 src/patches/epia_dma.patch create mode 100644 src/patches/padlock-prereq-2.6.16.diff diff --git a/config/kernel/kernel.config.i586 b/config/kernel/kernel.config.i586 index 026514ebb..6514d3be9 100644 --- a/config/kernel/kernel.config.i586 +++ b/config/kernel/kernel.config.i586 @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.16.50-ipfire -# Wed Jun 6 00:04:48 2007 +# Sat Jun 9 17:53:46 2007 # CONFIG_X86_32=y CONFIG_SEMAPHORE_SLEEPERS=y @@ -359,10 +359,10 @@ CONFIG_IP_PIMSM_V2=y CONFIG_ARPD=y CONFIG_SYN_COOKIES=y CONFIG_IPSEC_NAT_TRAVERSAL=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_TUNNEL=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_TCP_CONG_ADVANCED=y @@ -469,6 +469,7 @@ CONFIG_IP_NF_NAT_AMANDA=m CONFIG_IP_NF_NAT_PPTP=m CONFIG_IP_NF_NAT_H323=m CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_IMQ=m CONFIG_IP_NF_TARGET_TOS=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m @@ -1036,6 +1037,12 @@ CONFIG_IFB=m CONFIG_DUMMY=m CONFIG_BONDING=m CONFIG_EQUALIZER=m +CONFIG_IMQ=m +# CONFIG_IMQ_BEHAVIOR_AA is not set +# CONFIG_IMQ_BEHAVIOR_AB is not set +CONFIG_IMQ_BEHAVIOR_BA=y +# CONFIG_IMQ_BEHAVIOR_BB is not set +CONFIG_IMQ_NUM_DEVS=2 CONFIG_TUN=m CONFIG_NET_SB1000=m diff --git a/config/kernel/kernel.config.i586.smp b/config/kernel/kernel.config.i586.smp index 0ee763cf0..8a553b5aa 100644 --- a/config/kernel/kernel.config.i586.smp +++ b/config/kernel/kernel.config.i586.smp @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.16.50-ipfire -# Fri Jun 1 22:33:32 2007 +# Sat Jun 9 17:54:11 2007 # CONFIG_X86_32=y CONFIG_SEMAPHORE_SLEEPERS=y @@ -365,10 +365,10 @@ CONFIG_IP_PIMSM_V2=y CONFIG_ARPD=y CONFIG_SYN_COOKIES=y CONFIG_IPSEC_NAT_TRAVERSAL=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_TUNNEL=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_TCP_CONG_ADVANCED=y @@ -475,6 +475,7 @@ CONFIG_IP_NF_NAT_AMANDA=m CONFIG_IP_NF_NAT_PPTP=m CONFIG_IP_NF_NAT_H323=m CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_IMQ=m CONFIG_IP_NF_TARGET_TOS=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m @@ -1041,6 +1042,12 @@ CONFIG_IFB=m CONFIG_DUMMY=m CONFIG_BONDING=m CONFIG_EQUALIZER=m +CONFIG_IMQ=m +# CONFIG_IMQ_BEHAVIOR_AA is not set +# CONFIG_IMQ_BEHAVIOR_AB is not set +CONFIG_IMQ_BEHAVIOR_BA=y +# CONFIG_IMQ_BEHAVIOR_BB is not set +CONFIG_IMQ_NUM_DEVS=2 CONFIG_TUN=m CONFIG_NET_SB1000=m diff --git a/doc/packages-list.txt b/doc/packages-list.txt index 2e4463166..6a9f145b3 100644 --- a/doc/packages-list.txt +++ b/doc/packages-list.txt @@ -145,7 +145,6 @@ * libwww-perl-5.803 * libxml2-2.6.26 * linux-2.6.16.50 -* linux-2.6.16.50-xen * linux-atm-2.4.1 * linux-libc-headers-2.6.12.0 * linuxigd-0.95 diff --git a/lfs/linux b/lfs/linux index ff7a4602d..dd6911b98 100644 --- a/lfs/linux +++ b/lfs/linux @@ -140,6 +140,10 @@ endif # ip_conntrack permissions from 440 to 444 cd $(DIR_APP) && patch -Np0 < $(DIR_SRC)/src/patches/ip_conntrack_standalone-patch-for-ipfire.patch + + # Some VIA patches + cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/padlock-prereq-2.6.16.diff + cd $(DIR_APP) && patch -Np0 < $(DIR_SRC)/src/patches/epia_dma.patch # Patch-o-matic cd $(DIR_SRC) && rm -rf iptables-* patch-o-matic* @@ -161,7 +165,9 @@ ifeq "$(XEN)" "1" endif # Linux Intermediate Queueing Device - #cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6.16-imq2.diff +ifeq "$(XEN)" "" + cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6.16-imq2.diff +endif # mISDN cd $(DIR_SRC) && rm -rf mISDN-* diff --git a/lfs/openswan b/lfs/openswan index b07dea2a4..989ce96aa 100644 --- a/lfs/openswan +++ b/lfs/openswan @@ -81,6 +81,7 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects)) cd $(DIR_APP) && make programs cd $(DIR_APP) && make install #mv -f /etc/rc.d/init.d/ipsec /etc/rc.d/ + -rm -rfv /etc/rc*.d/*ipsec rm -f /etc/ipsec.conf /etc/ipsec.secrets ln -sf $(CONFIG_ROOT)/vpn/ipsec.conf /etc/ipsec.conf ln -sf $(CONFIG_ROOT)/vpn/ipsec.secrets /etc/ipsec.secrets diff --git a/lfs/shadow b/lfs/shadow index 93a7576de..aedef2606 100644 --- a/lfs/shadow +++ b/lfs/shadow @@ -87,5 +87,7 @@ $(TARGET) : $(patsubst %,$(DIR_DL)/%,$(objects)) rm -v /lib/libshadow.so ln -sfv ../../lib/libshadow.so.0 /usr/lib/libshadow.so mkdir -v /etc/default + touch /etc/shadow + chmod 600 /etc/shadow @rm -rf $(DIR_APP) @$(POSTBUILD) diff --git a/src/initscripts/init.d/ipsec b/src/initscripts/init.d/ipsec index d6b2b3815..e37074742 100644 --- a/src/initscripts/init.d/ipsec +++ b/src/initscripts/init.d/ipsec @@ -139,6 +139,7 @@ case "$1" in cat ${outtmp} | logger -s -p $IPSECsyslog -t ipsec_setup 2>&1 rm -f ${outtmp} fi + sleep 20 && chown root:nobody /var/run/pluto -R && chmod 770 /var/run/pluto -R && ln -sf /var/run/pluto/pluto.pid /var/run/pluto.pid 2>&1 & exit $st ;; diff --git a/src/misc-progs/outgoingfwctrl.c b/src/misc-progs/outgoingfwctrl.c index b43e106dc..2d993d940 100644 --- a/src/misc-progs/outgoingfwctrl.c +++ b/src/misc-progs/outgoingfwctrl.c @@ -18,6 +18,7 @@ int main(int argc, char *argv[]) { if (!(initsetuid())) exit(1); + safe_system("chmod 755 /var/ipfire/outgoing/bin/outgoingfw.pl"); safe_system("/var/ipfire/outgoing/bin/outgoingfw.pl"); return 0; } diff --git a/src/patches/epia_dma.patch b/src/patches/epia_dma.patch new file mode 100644 index 000000000..203c04207 --- /dev/null +++ b/src/patches/epia_dma.patch @@ -0,0 +1,37 @@ +*** orgdriver/ide/pci/via82cxxx.c Thu May 3 21:49:52 2007 +--- drivers/ide/pci/via82cxxx.c Thu May 10 11:51:49 2007 +*************** +*** 67,72 **** +--- 67,76 ---- + #define VIA_NO_UNMASK 0x080 /* Doesn't work with IRQ unmasking on */ + #define VIA_BAD_ID 0x100 /* Has wrong vendor ID (0x1107) */ + #define VIA_BAD_AST 0x200 /* Don't touch Address Setup Timing */ ++ #define PCI_DEVICE_ID_VIA_VT8251 0x3287 ++ #define PCI_DEVICE_ID_VIA_VT8237A 0x3337 ++ #define PCI_DEVICE_ID_VIA_CX700 0x8324 ++ #define PCI_DEVICE_ID_VIA_CX700_IDE 0x5324 + + /* + * VIA SouthBridge chips. +*************** +*** 79,84 **** +--- 83,91 ---- + u8 rev_max; + u16 flags; + } via_isa_bridges[] = { ++ { "vt8251", PCI_DEVICE_ID_VIA_VT8251 , 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, ++ { "vt8237a", PCI_DEVICE_ID_VIA_VT8237A , 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, ++ { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, + { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, + { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, + { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, +*************** +*** 498,503 **** +--- 505,511 ---- + static struct pci_device_id via_pci_tbl[] = { + { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, ++ { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0,0}, + { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, + { 0, }, + }; diff --git a/src/patches/padlock-prereq-2.6.16.diff b/src/patches/padlock-prereq-2.6.16.diff new file mode 100644 index 000000000..71b14d1da --- /dev/null +++ b/src/patches/padlock-prereq-2.6.16.diff @@ -0,0 +1,2913 @@ +Merge master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6 + +* master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6: + [CRYPTO] aes: Fixed array boundary violation + [CRYPTO] tcrypt: Fix key alignment + [CRYPTO] all: Add missing cra_alignmask + [CRYPTO] all: Use kzalloc where possible + [CRYPTO] api: Align tfm context as wide as possible + [CRYPTO] twofish: Use rol32/ror32 where appropriate + +Index: linux-2.6.16.50/arch/x86_64/crypto/aes.c +=================================================================== +--- linux-2.6.16.50.orig/arch/x86_64/crypto/aes.c 2006-07-14 18:09:26.335435750 +1200 ++++ linux-2.6.16.50/arch/x86_64/crypto/aes.c 2006-07-14 18:10:31.083482250 +1200 +@@ -77,12 +77,11 @@ + struct aes_ctx + { + u32 key_length; +- u32 E[60]; +- u32 D[60]; ++ u32 buf[120]; + }; + +-#define E_KEY ctx->E +-#define D_KEY ctx->D ++#define E_KEY (&ctx->buf[0]) ++#define D_KEY (&ctx->buf[60]) + + static u8 pow_tab[256] __initdata; + static u8 log_tab[256] __initdata; +@@ -228,10 +227,10 @@ + t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ + } + +-static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, +- u32 *flags) ++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct aes_ctx *ctx = ctx_arg; ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + u32 i, j, t, u, v, w; + +@@ -284,8 +283,18 @@ + return 0; + } + +-extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in); +-extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in); ++asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); ++asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); ++ ++static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ ++ aes_enc_blk(tfm, dst, src); ++} ++ ++static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ ++ aes_dec_blk(tfm, dst, src); ++} + + static struct crypto_alg aes_alg = { + .cra_name = "aes", +Index: linux-2.6.16.50/crypto/aes.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/aes.c 2006-07-14 18:09:26.351436750 +1200 ++++ linux-2.6.16.50/crypto/aes.c 2006-07-14 18:10:31.087482500 +1200 +@@ -75,12 +75,11 @@ + + struct aes_ctx { + int key_length; +- u32 E[60]; +- u32 D[60]; ++ u32 buf[120]; + }; + +-#define E_KEY ctx->E +-#define D_KEY ctx->D ++#define E_KEY (&ctx->buf[0]) ++#define D_KEY (&ctx->buf[60]) + + static u8 pow_tab[256] __initdata; + static u8 log_tab[256] __initdata; +@@ -249,10 +248,10 @@ + t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ + } + +-static int +-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) ++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct aes_ctx *ctx = ctx_arg; ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + u32 i, t, u, v, w; + +@@ -319,9 +318,9 @@ + f_rl(bo, bi, 2, k); \ + f_rl(bo, bi, 3, k) + +-static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) ++static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- const struct aes_ctx *ctx = ctx_arg; ++ const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *src = (const __le32 *)in; + __le32 *dst = (__le32 *)out; + u32 b0[4], b1[4]; +@@ -374,9 +373,9 @@ + i_rl(bo, bi, 2, k); \ + i_rl(bo, bi, 3, k) + +-static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) ++static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- const struct aes_ctx *ctx = ctx_arg; ++ const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *src = (const __le32 *)in; + __le32 *dst = (__le32 *)out; + u32 b0[4], b1[4]; +Index: linux-2.6.16.50/crypto/api.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/api.c 2006-07-14 18:09:26.351436750 +1200 ++++ linux-2.6.16.50/crypto/api.c 2006-07-14 18:10:31.091482750 +1200 +@@ -165,7 +165,7 @@ + break; + } + +- return len + alg->cra_alignmask; ++ return len + (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + } + + struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) +@@ -179,24 +179,25 @@ + goto out; + + tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); +- tfm = kmalloc(tfm_size, GFP_KERNEL); ++ tfm = kzalloc(tfm_size, GFP_KERNEL); + if (tfm == NULL) + goto out_put; + +- memset(tfm, 0, tfm_size); +- + tfm->__crt_alg = alg; + + if (crypto_init_flags(tfm, flags)) + goto out_free_tfm; + +- if (crypto_init_ops(tfm)) { +- crypto_exit_ops(tfm); ++ if (crypto_init_ops(tfm)) + goto out_free_tfm; +- } ++ ++ if (alg->cra_init && alg->cra_init(tfm)) ++ goto cra_init_failed; + + goto out; + ++cra_init_failed: ++ crypto_exit_ops(tfm); + out_free_tfm: + kfree(tfm); + tfm = NULL; +@@ -217,6 +218,8 @@ + alg = tfm->__crt_alg; + size = sizeof(*tfm) + alg->cra_ctxsize; + ++ if (alg->cra_exit) ++ alg->cra_exit(tfm); + crypto_exit_ops(tfm); + crypto_alg_put(alg); + memset(tfm, 0, size); +@@ -226,7 +229,7 @@ + static inline int crypto_set_driver_name(struct crypto_alg *alg) + { + static const char suffix[] = "-generic"; +- char *driver_name = (char *)alg->cra_driver_name; ++ char *driver_name = alg->cra_driver_name; + int len; + + if (*driver_name) +@@ -264,13 +267,13 @@ + down_write(&crypto_alg_sem); + + list_for_each_entry(q, &crypto_alg_list, cra_list) { +- if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { ++ if (q == alg) { + ret = -EEXIST; + goto out; + } + } + +- list_add_tail(&alg->cra_list, &crypto_alg_list); ++ list_add(&alg->cra_list, &crypto_alg_list); + out: + up_write(&crypto_alg_sem); + return ret; +Index: linux-2.6.16.50/crypto/deflate.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/deflate.c 2006-07-14 18:09:26.351436750 +1200 ++++ linux-2.6.16.50/crypto/deflate.c 2006-07-14 18:10:31.091482750 +1200 +@@ -73,12 +73,11 @@ + int ret = 0; + struct z_stream_s *stream = &ctx->decomp_stream; + +- stream->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); ++ stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + if (!stream->workspace ) { + ret = -ENOMEM; + goto out; + } +- memset(stream->workspace, 0, zlib_inflate_workspacesize()); + ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); + if (ret != Z_OK) { + ret = -EINVAL; +@@ -103,8 +102,9 @@ + kfree(ctx->decomp_stream.workspace); + } + +-static int deflate_init(void *ctx) ++static int deflate_init(struct crypto_tfm *tfm) + { ++ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = deflate_comp_init(ctx); +@@ -117,17 +117,19 @@ + return ret; + } + +-static void deflate_exit(void *ctx) ++static void deflate_exit(struct crypto_tfm *tfm) + { ++ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); ++ + deflate_comp_exit(ctx); + deflate_decomp_exit(ctx); + } + +-static int deflate_compress(void *ctx, const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen) ++static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen) + { + int ret = 0; +- struct deflate_ctx *dctx = ctx; ++ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct z_stream_s *stream = &dctx->comp_stream; + + ret = zlib_deflateReset(stream); +@@ -152,12 +154,12 @@ + return ret; + } + +-static int deflate_decompress(void *ctx, const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen) ++static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen) + { + + int ret = 0; +- struct deflate_ctx *dctx = ctx; ++ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); + struct z_stream_s *stream = &dctx->decomp_stream; + + ret = zlib_inflateReset(stream); +@@ -199,9 +201,9 @@ + .cra_ctxsize = sizeof(struct deflate_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), ++ .cra_init = deflate_init, ++ .cra_exit = deflate_exit, + .cra_u = { .compress = { +- .coa_init = deflate_init, +- .coa_exit = deflate_exit, + .coa_compress = deflate_compress, + .coa_decompress = deflate_decompress } } + }; +Index: linux-2.6.16.50/crypto/des.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/des.c 2006-07-14 18:09:26.355437000 +1200 ++++ linux-2.6.16.50/crypto/des.c 2006-07-14 18:10:31.099483250 +1200 +@@ -783,9 +783,10 @@ + } + } + +-static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) ++static int des_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { +- struct des_ctx *dctx = ctx; ++ struct des_ctx *dctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + +@@ -803,9 +804,10 @@ + return 0; + } + +-static void des_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- const u32 *K = ((struct des_ctx *)ctx)->expkey; ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ const u32 *K = ctx->expkey; + const __le32 *s = (const __le32 *)src; + __le32 *d = (__le32 *)dst; + u32 L, R, A, B; +@@ -825,9 +827,10 @@ + d[1] = cpu_to_le32(L); + } + +-static void des_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- const u32 *K = ((struct des_ctx *)ctx)->expkey + DES_EXPKEY_WORDS - 2; ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; + const __le32 *s = (const __le32 *)src; + __le32 *d = (__le32 *)dst; + u32 L, R, A, B; +@@ -860,11 +863,11 @@ + * property. + * + */ +-static int des3_ede_setkey(void *ctx, const u8 *key, ++static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) + { + const u32 *K = (const u32 *)key; +- struct des3_ede_ctx *dctx = ctx; ++ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); + u32 *expkey = dctx->expkey; + + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || +@@ -881,9 +884,9 @@ + return 0; + } + +-static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct des3_ede_ctx *dctx = ctx; ++ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); + const u32 *K = dctx->expkey; + const __le32 *s = (const __le32 *)src; + __le32 *d = (__le32 *)dst; +@@ -912,9 +915,9 @@ + d[1] = cpu_to_le32(L); + } + +-static void des3_ede_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct des3_ede_ctx *dctx = ctx; ++ struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); + const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; + const __le32 *s = (const __le32 *)src; + __le32 *d = (__le32 *)dst; +@@ -965,6 +968,7 @@ + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct des3_ede_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list), + .cra_u = { .cipher = { + .cia_min_keysize = DES3_EDE_KEY_SIZE, +Index: linux-2.6.16.50/crypto/serpent.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/serpent.c 2006-07-14 18:09:26.355437000 +1200 ++++ linux-2.6.16.50/crypto/serpent.c 2006-07-14 18:10:31.103483500 +1200 +@@ -215,9 +215,11 @@ + }; + + +-static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) ++static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { +- u32 *k = ((struct serpent_ctx *)ctx)->expkey; ++ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); ++ u32 *k = ctx->expkey; + u8 *k8 = (u8 *)k; + u32 r0,r1,r2,r3,r4; + int i; +@@ -365,10 +367,11 @@ + return 0; + } + +-static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { ++ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 +- *k = ((struct serpent_ctx *)ctx)->expkey, ++ *k = ctx->expkey, + *s = (const u32 *)src; + u32 *d = (u32 *)dst, + r0, r1, r2, r3, r4; +@@ -423,8 +426,9 @@ + d[3] = cpu_to_le32(r3); + } + +-static void serpent_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { ++ struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 + *k = ((struct serpent_ctx *)ctx)->expkey, + *s = (const u32 *)src; +@@ -481,6 +485,7 @@ + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SERPENT_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct serpent_ctx), ++ .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), + .cra_u = { .cipher = { +@@ -491,7 +496,8 @@ + .cia_decrypt = serpent_decrypt } } + }; + +-static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) ++static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { + u8 rev_key[SERPENT_MAX_KEY_SIZE]; + int i; +@@ -505,10 +511,10 @@ + for (i = 0; i < keylen; ++i) + rev_key[keylen - i - 1] = key[i]; + +- return serpent_setkey(ctx, rev_key, keylen, flags); ++ return serpent_setkey(tfm, rev_key, keylen, flags); + } + +-static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { + const u32 * const s = (const u32 * const)src; + u32 * const d = (u32 * const)dst; +@@ -520,7 +526,7 @@ + rs[2] = swab32(s[1]); + rs[3] = swab32(s[0]); + +- serpent_encrypt(ctx, (u8 *)rd, (u8 *)rs); ++ serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs); + + d[0] = swab32(rd[3]); + d[1] = swab32(rd[2]); +@@ -528,7 +534,7 @@ + d[3] = swab32(rd[0]); + } + +-static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { + const u32 * const s = (const u32 * const)src; + u32 * const d = (u32 * const)dst; +@@ -540,7 +546,7 @@ + rs[2] = swab32(s[1]); + rs[3] = swab32(s[0]); + +- serpent_decrypt(ctx, (u8 *)rd, (u8 *)rs); ++ serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs); + + d[0] = swab32(rd[3]); + d[1] = swab32(rd[2]); +Index: linux-2.6.16.50/crypto/tcrypt.h +=================================================================== +--- linux-2.6.16.50.orig/crypto/tcrypt.h 2006-07-14 18:09:26.355437000 +1200 ++++ linux-2.6.16.50/crypto/tcrypt.h 2006-07-14 18:10:31.111484000 +1200 +@@ -26,37 +26,38 @@ + #define MAX_IVLEN 32 + + struct hash_testvec { ++ /* only used with keyed hash algorithms */ ++ char key[128] __attribute__ ((__aligned__(4))); + char plaintext[128]; +- unsigned char psize; + char digest[MAX_DIGEST_SIZE]; +- unsigned char np; + unsigned char tap[MAX_TAP]; +- char key[128]; /* only used with keyed hash algorithms */ ++ unsigned char psize; ++ unsigned char np; + unsigned char ksize; + }; + + struct hmac_testvec { + char key[128]; +- unsigned char ksize; + char plaintext[128]; +- unsigned char psize; + char digest[MAX_DIGEST_SIZE]; +- unsigned char np; + unsigned char tap[MAX_TAP]; ++ unsigned char ksize; ++ unsigned char psize; ++ unsigned char np; + }; + + struct cipher_testvec { ++ char key[MAX_KEYLEN] __attribute__ ((__aligned__(4))); ++ char iv[MAX_IVLEN]; ++ char input[48]; ++ char result[48]; ++ unsigned char tap[MAX_TAP]; ++ int np; + unsigned char fail; + unsigned char wk; /* weak key flag */ +- char key[MAX_KEYLEN]; + unsigned char klen; +- char iv[MAX_IVLEN]; +- char input[48]; + unsigned char ilen; +- char result[48]; + unsigned char rlen; +- int np; +- unsigned char tap[MAX_TAP]; + }; + + struct cipher_speed { +@@ -64,6 +65,11 @@ + unsigned int blen; + }; + ++struct digest_speed { ++ unsigned int blen; /* buffer length */ ++ unsigned int plen; /* per-update length */ ++}; ++ + /* + * MD4 test vectors from RFC1320 + */ +@@ -2974,4 +2980,35 @@ + { .klen = 0, .blen = 0, } + }; + ++/* ++ * Digest speed tests ++ */ ++static struct digest_speed generic_digest_speed_template[] = { ++ { .blen = 16, .plen = 16, }, ++ { .blen = 64, .plen = 16, }, ++ { .blen = 64, .plen = 64, }, ++ { .blen = 256, .plen = 16, }, ++ { .blen = 256, .plen = 64, }, ++ { .blen = 256, .plen = 256, }, ++ { .blen = 1024, .plen = 16, }, ++ { .blen = 1024, .plen = 256, }, ++ { .blen = 1024, .plen = 1024, }, ++ { .blen = 2048, .plen = 16, }, ++ { .blen = 2048, .plen = 256, }, ++ { .blen = 2048, .plen = 1024, }, ++ { .blen = 2048, .plen = 2048, }, ++ { .blen = 4096, .plen = 16, }, ++ { .blen = 4096, .plen = 256, }, ++ { .blen = 4096, .plen = 1024, }, ++ { .blen = 4096, .plen = 4096, }, ++ { .blen = 8192, .plen = 16, }, ++ { .blen = 8192, .plen = 256, }, ++ { .blen = 8192, .plen = 1024, }, ++ { .blen = 8192, .plen = 4096, }, ++ { .blen = 8192, .plen = 8192, }, ++ ++ /* End marker */ ++ { .blen = 0, .plen = 0, } ++}; ++ + #endif /* _CRYPTO_TCRYPT_H */ +Index: linux-2.6.16.50/crypto/twofish.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/twofish.c 2006-07-14 18:09:26.359437250 +1200 ++++ linux-2.6.16.50/crypto/twofish.c 2006-07-14 18:10:31.119484500 +1200 +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + + /* The large precomputed tables for the Twofish cipher (twofish.c) +@@ -542,9 +543,9 @@ + #define CALC_K(a, j, k, l, m, n) \ + x = CALC_K_2 (k, l, k, l, 0); \ + y = CALC_K_2 (m, n, m, n, 4); \ +- y = (y << 8) + (y >> 24); \ ++ y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ +- ctx->a[(j) + 1] = (y << 9) + (y >> 23) ++ ctx->a[(j) + 1] = rol32(y, 9) + + #define CALC_K192_2(a, b, c, d, j) \ + CALC_K_2 (q0[a ^ key[(j) + 16]], \ +@@ -555,9 +556,9 @@ + #define CALC_K192(a, j, k, l, m, n) \ + x = CALC_K192_2 (l, l, k, k, 0); \ + y = CALC_K192_2 (n, n, m, m, 4); \ +- y = (y << 8) + (y >> 24); \ ++ y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ +- ctx->a[(j) + 1] = (y << 9) + (y >> 23) ++ ctx->a[(j) + 1] = rol32(y, 9) + + #define CALC_K256_2(a, b, j) \ + CALC_K192_2 (q1[b ^ key[(j) + 24]], \ +@@ -568,9 +569,9 @@ + #define CALC_K256(a, j, k, l, m, n) \ + x = CALC_K256_2 (k, l, 0); \ + y = CALC_K256_2 (m, n, 4); \ +- y = (y << 8) + (y >> 24); \ ++ y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ +- ctx->a[(j) + 1] = (y << 9) + (y >> 23) ++ ctx->a[(j) + 1] = rol32(y, 9) + + + /* Macros to compute the g() function in the encryption and decryption +@@ -594,15 +595,15 @@ + x = G1 (a); y = G2 (b); \ + x += y; y += x + ctx->k[2 * (n) + 1]; \ + (c) ^= x + ctx->k[2 * (n)]; \ +- (c) = ((c) >> 1) + ((c) << 31); \ +- (d) = (((d) << 1)+((d) >> 31)) ^ y ++ (c) = ror32((c), 1); \ ++ (d) = rol32((d), 1) ^ y + + #define DECROUND(n, a, b, c, d) \ + x = G1 (a); y = G2 (b); \ + x += y; y += x; \ + (d) ^= y + ctx->k[2 * (n) + 1]; \ +- (d) = ((d) >> 1) + ((d) << 31); \ +- (c) = (((c) << 1)+((c) >> 31)); \ ++ (d) = ror32((d), 1); \ ++ (c) = rol32((c), 1); \ + (c) ^= (x + ctx->k[2 * (n)]) + + /* Encryption and decryption cycles; each one is simply two Feistel rounds +@@ -642,11 +643,11 @@ + }; + + /* Perform the key setup. */ +-static int twofish_setkey(void *cx, const u8 *key, +- unsigned int key_len, u32 *flags) ++static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int key_len, u32 *flags) + { + +- struct twofish_ctx *ctx = cx; ++ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); + + int i, j, k; + +@@ -801,9 +802,9 @@ + } + + /* Encrypt one block. in and out may be the same. */ +-static void twofish_encrypt(void *cx, u8 *out, const u8 *in) ++static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct twofish_ctx *ctx = cx; ++ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *src = (const __le32 *)in; + __le32 *dst = (__le32 *)out; + +@@ -838,9 +839,9 @@ + } + + /* Decrypt one block. in and out may be the same. */ +-static void twofish_decrypt(void *cx, u8 *out, const u8 *in) ++static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct twofish_ctx *ctx = cx; ++ struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *src = (const __le32 *)in; + __le32 *dst = (__le32 *)out; + +Index: linux-2.6.16.50/drivers/crypto/padlock-aes.c +=================================================================== +--- linux-2.6.16.50.orig/drivers/crypto/padlock-aes.c 2006-07-14 18:09:26.387439000 +1200 ++++ linux-2.6.16.50/drivers/crypto/padlock-aes.c 2006-07-18 01:35:50.305291201 +1200 +@@ -59,16 +59,20 @@ + #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ + #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) + ++/* Whenever making any changes to the following ++ * structure *make sure* you keep E, d_data ++ * and cword aligned on 16 Bytes boundaries!!! */ + struct aes_ctx { +- uint32_t e_data[AES_EXTENDED_KEY_SIZE]; +- uint32_t d_data[AES_EXTENDED_KEY_SIZE]; + struct { + struct cword encrypt; + struct cword decrypt; + } cword; +- uint32_t *E; +- uint32_t *D; ++ u32 *D; + int key_length; ++ u32 E[AES_EXTENDED_KEY_SIZE] ++ __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); ++ u32 d_data[AES_EXTENDED_KEY_SIZE] ++ __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + }; + + /* ====== Key management routines ====== */ +@@ -282,15 +286,20 @@ + return 0; + } + +-static inline struct aes_ctx *aes_ctx(void *ctx) ++static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) + { +- return (struct aes_ctx *)ALIGN((unsigned long)ctx, PADLOCK_ALIGNMENT); ++ unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); ++ unsigned long align = PADLOCK_ALIGNMENT; ++ ++ if (align <= crypto_tfm_ctx_alignment()) ++ align = 1; ++ return (struct aes_ctx *)ALIGN(addr, align); + } + +-static int +-aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) ++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct aes_ctx *ctx = aes_ctx(ctx_arg); ++ struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + uint32_t i, t, u, v, w; + uint32_t P[AES_EXTENDED_KEY_SIZE]; +@@ -308,8 +317,7 @@ + * itself we must supply the plain key for both encryption + * and decryption. + */ +- ctx->E = ctx->e_data; +- ctx->D = ctx->e_data; ++ ctx->D = ctx->E; + + E_KEY[0] = le32_to_cpu(key[0]); + E_KEY[1] = le32_to_cpu(key[1]); +@@ -410,24 +418,22 @@ + return iv; + } + +-static void +-aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) ++static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct aes_ctx *ctx = aes_ctx(ctx_arg); ++ struct aes_ctx *ctx = aes_ctx(tfm); + padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); + } + +-static void +-aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) ++static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct aes_ctx *ctx = aes_ctx(ctx_arg); ++ struct aes_ctx *ctx = aes_ctx(tfm); + padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); + } + + static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) + { +- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); ++ struct aes_ctx *ctx = aes_ctx(desc->tfm); + padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); +@@ -436,7 +442,7 @@ + static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) + { +- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); ++ struct aes_ctx *ctx = aes_ctx(desc->tfm); + padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); +@@ -445,7 +451,7 @@ + static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) + { +- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); ++ struct aes_ctx *ctx = aes_ctx(desc->tfm); + u8 *iv; + + iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, +@@ -458,7 +464,7 @@ + static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) + { +- struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); ++ struct aes_ctx *ctx = aes_ctx(desc->tfm); + padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + return nbytes & ~(AES_BLOCK_SIZE - 1); +Index: linux-2.6.16.50/include/linux/crypto.h +=================================================================== +--- linux-2.6.16.50.orig/include/linux/crypto.h 2006-07-14 18:09:26.387439000 +1200 ++++ linux-2.6.16.50/include/linux/crypto.h 2006-07-18 01:35:17.475239451 +1200 +@@ -67,7 +67,7 @@ + + struct cipher_desc { + struct crypto_tfm *tfm; +- void (*crfn)(void *ctx, u8 *dst, const u8 *src); ++ void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +@@ -80,10 +80,10 @@ + struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; +- int (*cia_setkey)(void *ctx, const u8 *key, ++ int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags); +- void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src); +- void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src); ++ void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); ++ void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + + unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, +@@ -101,20 +101,19 @@ + + struct digest_alg { + unsigned int dia_digestsize; +- void (*dia_init)(void *ctx); +- void (*dia_update)(void *ctx, const u8 *data, unsigned int len); +- void (*dia_final)(void *ctx, u8 *out); +- int (*dia_setkey)(void *ctx, const u8 *key, ++ void (*dia_init)(struct crypto_tfm *tfm); ++ void (*dia_update)(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len); ++ void (*dia_final)(struct crypto_tfm *tfm, u8 *out); ++ int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags); + }; + + struct compress_alg { +- int (*coa_init)(void *ctx); +- void (*coa_exit)(void *ctx); +- int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen); +- int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen); ++ int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen); ++ int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen); + }; + + #define cra_cipher cra_u.cipher +@@ -130,14 +129,17 @@ + + int cra_priority; + +- const char cra_name[CRYPTO_MAX_ALG_NAME]; +- const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; ++ char cra_name[CRYPTO_MAX_ALG_NAME]; ++ char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + + union { + struct cipher_alg cipher; + struct digest_alg digest; + struct compress_alg compress; + } cra_u; ++ ++ int (*cra_init)(struct crypto_tfm *tfm); ++ void (*cra_exit)(struct crypto_tfm *tfm); + + struct module *cra_module; + }; +@@ -229,6 +231,8 @@ + } crt_u; + + struct crypto_alg *__crt_alg; ++ ++ char __crt_ctx[] __attribute__ ((__aligned__)); + }; + + /* +@@ -301,7 +305,13 @@ + + static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) + { +- return (void *)&tfm[1]; ++ return tfm->__crt_ctx; ++} ++ ++static inline unsigned int crypto_tfm_ctx_alignment(void) ++{ ++ struct crypto_tfm *tfm; ++ return __alignof__(tfm->__crt_ctx); + } + + /* +Index: linux-2.6.16.50/arch/i386/crypto/aes-i586-asm.S +=================================================================== +--- linux-2.6.16.50.orig/arch/i386/crypto/aes-i586-asm.S 2006-07-14 18:09:26.339436000 +1200 ++++ linux-2.6.16.50/arch/i386/crypto/aes-i586-asm.S 2006-07-14 18:10:31.131485250 +1200 +@@ -36,22 +36,19 @@ + .file "aes-i586-asm.S" + .text + +-// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// +-// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// +- +-#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) +- +-// offsets to parameters with one register pushed onto stack ++#include + +-#define in_blk 8 // input byte array address parameter +-#define out_blk 12 // output byte array address parameter +-#define ctx 16 // AES context structure +- +-// offsets in context structure ++#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) + +-#define ekey 0 // encryption key schedule base address +-#define nrnd 256 // number of rounds +-#define dkey 260 // decryption key schedule base address ++/* offsets to parameters with one register pushed onto stack */ ++#define tfm 8 ++#define out_blk 12 ++#define in_blk 16 ++ ++/* offsets in crypto_tfm structure */ ++#define ekey (crypto_tfm_ctx_offset + 0) ++#define nrnd (crypto_tfm_ctx_offset + 256) ++#define dkey (crypto_tfm_ctx_offset + 260) + + // register mapping for encrypt and decrypt subroutines + +@@ -220,6 +217,7 @@ + do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ + + // AES (Rijndael) Encryption Subroutine ++/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ + + .global aes_enc_blk + +@@ -230,7 +228,7 @@ + + aes_enc_blk: + push %ebp +- mov ctx(%esp),%ebp // pointer to context ++ mov tfm(%esp),%ebp + + // CAUTION: the order and the values used in these assigns + // rely on the register mappings +@@ -295,6 +293,7 @@ + ret + + // AES (Rijndael) Decryption Subroutine ++/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ + + .global aes_dec_blk + +@@ -305,7 +304,7 @@ + + aes_dec_blk: + push %ebp +- mov ctx(%esp),%ebp // pointer to context ++ mov tfm(%esp),%ebp + + // CAUTION: the order and the values used in these assigns + // rely on the register mappings +Index: linux-2.6.16.50/arch/i386/crypto/aes.c +=================================================================== +--- linux-2.6.16.50.orig/arch/i386/crypto/aes.c 2006-07-14 18:09:26.343436250 +1200 ++++ linux-2.6.16.50/arch/i386/crypto/aes.c 2006-07-14 18:10:31.135485500 +1200 +@@ -45,8 +45,8 @@ + #include + #include + +-asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx); +-asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx); ++asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); ++asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + + #define AES_MIN_KEY_SIZE 16 + #define AES_MAX_KEY_SIZE 32 +@@ -378,12 +378,12 @@ + k[8*(i)+11] = ss[3]; \ + } + +-static int +-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) ++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { + int i; + u32 ss[8]; +- struct aes_ctx *ctx = ctx_arg; ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + + /* encryption schedule */ +@@ -464,16 +464,16 @@ + return 0; + } + +-static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- aes_enc_blk(src, dst, ctx); ++ aes_enc_blk(tfm, dst, src); + } +-static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) ++ ++static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- aes_dec_blk(src, dst, ctx); ++ aes_dec_blk(tfm, dst, src); + } + +- + static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-i586", +Index: linux-2.6.16.50/arch/i386/kernel/asm-offsets.c +=================================================================== +--- linux-2.6.16.50.orig/arch/i386/kernel/asm-offsets.c 2006-07-14 18:09:26.343436250 +1200 ++++ linux-2.6.16.50/arch/i386/kernel/asm-offsets.c 2006-07-14 18:10:31.139485750 +1200 +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + #include + #include "sigframe.h" + #include +@@ -69,4 +70,6 @@ + + DEFINE(PAGE_SIZE_asm, PAGE_SIZE); + DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); ++ ++ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); + } +Index: linux-2.6.16.50/arch/s390/crypto/aes_s390.c +=================================================================== +--- linux-2.6.16.50.orig/arch/s390/crypto/aes_s390.c 2006-07-14 18:09:26.343436250 +1200 ++++ linux-2.6.16.50/arch/s390/crypto/aes_s390.c 2006-07-14 18:10:31.139485750 +1200 +@@ -37,10 +37,10 @@ + int key_len; + }; + +-static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len, +- u32 *flags) ++static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct s390_aes_ctx *sctx = ctx; ++ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + switch (key_len) { + case 16: +@@ -70,9 +70,9 @@ + return -EINVAL; + } + +-static void aes_encrypt(void *ctx, u8 *out, const u8 *in) ++static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- const struct s390_aes_ctx *sctx = ctx; ++ const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + switch (sctx->key_len) { + case 16: +@@ -90,9 +90,9 @@ + } + } + +-static void aes_decrypt(void *ctx, u8 *out, const u8 *in) ++static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- const struct s390_aes_ctx *sctx = ctx; ++ const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + switch (sctx->key_len) { + case 16: +Index: linux-2.6.16.50/arch/s390/crypto/des_s390.c +=================================================================== +--- linux-2.6.16.50.orig/arch/s390/crypto/des_s390.c 2006-07-14 18:09:26.347436500 +1200 ++++ linux-2.6.16.50/arch/s390/crypto/des_s390.c 2006-07-14 18:10:31.147486250 +1200 +@@ -44,10 +44,10 @@ + u8 key[DES3_192_KEY_SIZE]; + }; + +-static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, +- u32 *flags) ++static int des_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { +- struct crypt_s390_des_ctx *dctx = ctx; ++ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); + int ret; + + /* test if key is valid (not a weak key) */ +@@ -57,16 +57,16 @@ + return ret; + } + +-static void des_encrypt(void *ctx, u8 *out, const u8 *in) ++static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct crypt_s390_des_ctx *dctx = ctx; ++ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE); + } + +-static void des_decrypt(void *ctx, u8 *out, const u8 *in) ++static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct crypt_s390_des_ctx *dctx = ctx; ++ struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); + } +@@ -166,11 +166,11 @@ + * Implementers MUST reject keys that exhibit this property. + * + */ +-static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, +- u32 *flags) ++static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { + int i, ret; +- struct crypt_s390_des3_128_ctx *dctx = ctx; ++ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); + const u8* temp_key = key; + + if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { +@@ -186,17 +186,17 @@ + return 0; + } + +-static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct crypt_s390_des3_128_ctx *dctx = ctx; ++ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, + DES3_128_BLOCK_SIZE); + } + +-static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct crypt_s390_des3_128_ctx *dctx = ctx; ++ struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, + DES3_128_BLOCK_SIZE); +@@ -302,11 +302,11 @@ + * property. + * + */ +-static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, +- u32 *flags) ++static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { + int i, ret; +- struct crypt_s390_des3_192_ctx *dctx = ctx; ++ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); + const u8* temp_key = key; + + if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && +@@ -325,17 +325,17 @@ + return 0; + } + +-static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct crypt_s390_des3_192_ctx *dctx = ctx; ++ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, + DES3_192_BLOCK_SIZE); + } + +-static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct crypt_s390_des3_192_ctx *dctx = ctx; ++ struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); + + crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, + DES3_192_BLOCK_SIZE); +Index: linux-2.6.16.50/arch/s390/crypto/sha1_s390.c +=================================================================== +--- linux-2.6.16.50.orig/arch/s390/crypto/sha1_s390.c 2006-07-14 18:09:26.347436500 +1200 ++++ linux-2.6.16.50/arch/s390/crypto/sha1_s390.c 2006-07-14 18:10:31.147486250 +1200 +@@ -40,28 +40,29 @@ + u8 buffer[2 * SHA1_BLOCK_SIZE]; + }; + +-static void +-sha1_init(void *ctx) ++static void sha1_init(struct crypto_tfm *tfm) + { +- static const struct crypt_s390_sha1_ctx initstate = { +- .state = { +- 0x67452301, +- 0xEFCDAB89, +- 0x98BADCFE, +- 0x10325476, +- 0xC3D2E1F0 +- }, ++ struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); ++ static const u32 initstate[5] = { ++ 0x67452301, ++ 0xEFCDAB89, ++ 0x98BADCFE, ++ 0x10325476, ++ 0xC3D2E1F0 + }; +- memcpy(ctx, &initstate, sizeof(initstate)); ++ ++ ctx->count = 0; ++ memcpy(ctx->state, &initstate, sizeof(initstate)); ++ ctx->buf_len = 0; + } + +-static void +-sha1_update(void *ctx, const u8 *data, unsigned int len) ++static void sha1_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { + struct crypt_s390_sha1_ctx *sctx; + long imd_len; + +- sctx = ctx; ++ sctx = crypto_tfm_ctx(tfm); + sctx->count += len * 8; //message bit length + + //anything in buffer yet? -> must be completed +@@ -110,10 +111,9 @@ + } + + /* Add padding and return the message digest. */ +-static void +-sha1_final(void* ctx, u8 *out) ++static void sha1_final(struct crypto_tfm *tfm, u8 *out) + { +- struct crypt_s390_sha1_ctx *sctx = ctx; ++ struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + + //must perform manual padding + pad_message(sctx); +Index: linux-2.6.16.50/arch/s390/crypto/sha256_s390.c +=================================================================== +--- linux-2.6.16.50.orig/arch/s390/crypto/sha256_s390.c 2006-07-14 18:09:26.347436500 +1200 ++++ linux-2.6.16.50/arch/s390/crypto/sha256_s390.c 2006-07-14 18:10:31.151486500 +1200 +@@ -31,9 +31,9 @@ + u8 buf[2 * SHA256_BLOCK_SIZE]; + }; + +-static void sha256_init(void *ctx) ++static void sha256_init(struct crypto_tfm *tfm) + { +- struct s390_sha256_ctx *sctx = ctx; ++ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->state[0] = 0x6a09e667; + sctx->state[1] = 0xbb67ae85; +@@ -44,12 +44,12 @@ + sctx->state[6] = 0x1f83d9ab; + sctx->state[7] = 0x5be0cd19; + sctx->count = 0; +- memset(sctx->buf, 0, sizeof(sctx->buf)); + } + +-static void sha256_update(void *ctx, const u8 *data, unsigned int len) ++static void sha256_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { +- struct s390_sha256_ctx *sctx = ctx; ++ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int index; + int ret; + +@@ -108,9 +108,9 @@ + } + + /* Add padding and return the message digest */ +-static void sha256_final(void* ctx, u8 *out) ++static void sha256_final(struct crypto_tfm *tfm, u8 *out) + { +- struct s390_sha256_ctx *sctx = ctx; ++ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + + /* must perform manual padding */ + pad_message(sctx); +Index: linux-2.6.16.50/arch/x86_64/crypto/aes-x86_64-asm.S +=================================================================== +--- linux-2.6.16.50.orig/arch/x86_64/crypto/aes-x86_64-asm.S 2006-07-14 18:09:26.339436000 +1200 ++++ linux-2.6.16.50/arch/x86_64/crypto/aes-x86_64-asm.S 2006-07-14 18:10:31.151486500 +1200 +@@ -15,6 +15,10 @@ + + .text + ++#include ++ ++#define BASE crypto_tfm_ctx_offset ++ + #define R1 %rax + #define R1E %eax + #define R1X %ax +@@ -46,19 +50,19 @@ + #define R10 %r10 + #define R11 %r11 + +-#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ ++#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ + .global FUNC; \ + .type FUNC,@function; \ + .align 8; \ + FUNC: movq r1,r2; \ + movq r3,r4; \ +- leaq BASE+52(r8),r9; \ ++ leaq BASE+KEY+52(r8),r9; \ + movq r10,r11; \ + movl (r7),r5 ## E; \ + movl 4(r7),r1 ## E; \ + movl 8(r7),r6 ## E; \ + movl 12(r7),r7 ## E; \ +- movl (r8),r10 ## E; \ ++ movl BASE(r8),r10 ## E; \ + xorl -48(r9),r5 ## E; \ + xorl -44(r9),r1 ## E; \ + xorl -40(r9),r6 ## E; \ +@@ -128,8 +132,8 @@ + movl r3 ## E,r1 ## E; \ + movl r4 ## E,r2 ## E; + +-#define entry(FUNC,BASE,B128,B192) \ +- prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) ++#define entry(FUNC,KEY,B128,B192) \ ++ prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) + + #define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11) + +@@ -147,9 +151,9 @@ + #define decrypt_final(TAB,OFFSET) \ + round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) + +-/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */ ++/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ + +- entry(aes_encrypt,0,enc128,enc192) ++ entry(aes_enc_blk,0,enc128,enc192) + encrypt_round(aes_ft_tab,-96) + encrypt_round(aes_ft_tab,-80) + enc192: encrypt_round(aes_ft_tab,-64) +@@ -166,9 +170,9 @@ + encrypt_final(aes_fl_tab,112) + return + +-/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */ ++/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ + +- entry(aes_decrypt,240,dec128,dec192) ++ entry(aes_dec_blk,240,dec128,dec192) + decrypt_round(aes_it_tab,-96) + decrypt_round(aes_it_tab,-80) + dec192: decrypt_round(aes_it_tab,-64) +Index: linux-2.6.16.50/arch/x86_64/kernel/asm-offsets.c +=================================================================== +--- linux-2.6.16.50.orig/arch/x86_64/kernel/asm-offsets.c 2006-07-14 18:09:26.339436000 +1200 ++++ linux-2.6.16.50/arch/x86_64/kernel/asm-offsets.c 2006-07-14 18:10:31.155486750 +1200 +@@ -68,5 +68,7 @@ + DEFINE(pbe_next, offsetof(struct pbe, next)); + BLANK(); + DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); ++ BLANK(); ++ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); + return 0; + } +Index: linux-2.6.16.50/crypto/Kconfig +=================================================================== +--- linux-2.6.16.50.orig/crypto/Kconfig 2006-07-14 18:09:26.359437250 +1200 ++++ linux-2.6.16.50/crypto/Kconfig 2006-07-14 18:10:31.159487000 +1200 +@@ -337,7 +337,7 @@ + + config CRYPTO_TEST + tristate "Testing module" +- depends on CRYPTO ++ depends on CRYPTO && m + help + Quick & dirty crypto test module. + +Index: linux-2.6.16.50/crypto/anubis.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/anubis.c 2006-07-14 18:09:26.359437250 +1200 ++++ linux-2.6.16.50/crypto/anubis.c 2006-07-14 18:10:31.163487250 +1200 +@@ -460,16 +460,15 @@ + 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, + }; + +-static int anubis_setkey(void *ctx_arg, const u8 *in_key, ++static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) + { ++ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); + const __be32 *key = (const __be32 *)in_key; + int N, R, i, r; + u32 kappa[ANUBIS_MAX_N]; + u32 inter[ANUBIS_MAX_N]; + +- struct anubis_ctx *ctx = ctx_arg; +- + switch (key_len) + { + case 16: case 20: case 24: case 28: +@@ -660,15 +659,15 @@ + dst[i] = cpu_to_be32(inter[i]); + } + +-static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ++static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct anubis_ctx *ctx = ctx_arg; ++ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); + anubis_crypt(ctx->E, dst, src, ctx->R); + } + +-static void anubis_decrypt(void *ctx_arg, u8 *dst, const u8 *src) ++static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct anubis_ctx *ctx = ctx_arg; ++ struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); + anubis_crypt(ctx->D, dst, src, ctx->R); + } + +Index: linux-2.6.16.50/crypto/arc4.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/arc4.c 2006-07-14 18:09:26.359437250 +1200 ++++ linux-2.6.16.50/crypto/arc4.c 2006-07-14 18:10:31.163487250 +1200 +@@ -24,9 +24,10 @@ + u8 x, y; + }; + +-static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) ++static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct arc4_ctx *ctx = ctx_arg; ++ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); + int i, j = 0, k = 0; + + ctx->x = 1; +@@ -48,9 +49,9 @@ + return 0; + } + +-static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in) ++static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) + { +- struct arc4_ctx *ctx = ctx_arg; ++ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); + + u8 *const S = ctx->S; + u8 x = ctx->x; +Index: linux-2.6.16.50/crypto/blowfish.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/blowfish.c 2006-07-14 18:09:26.363437500 +1200 ++++ linux-2.6.16.50/crypto/blowfish.c 2006-07-14 18:10:31.167487500 +1200 +@@ -349,7 +349,7 @@ + dst[1] = yl; + } + +-static void bf_encrypt(void *ctx, u8 *dst, const u8 *src) ++static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { + const __be32 *in_blk = (const __be32 *)src; + __be32 *const out_blk = (__be32 *)dst; +@@ -357,17 +357,18 @@ + + in32[0] = be32_to_cpu(in_blk[0]); + in32[1] = be32_to_cpu(in_blk[1]); +- encrypt_block(ctx, out32, in32); ++ encrypt_block(crypto_tfm_ctx(tfm), out32, in32); + out_blk[0] = cpu_to_be32(out32[0]); + out_blk[1] = cpu_to_be32(out32[1]); + } + +-static void bf_decrypt(void *ctx, u8 *dst, const u8 *src) ++static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { ++ struct bf_ctx *ctx = crypto_tfm_ctx(tfm); + const __be32 *in_blk = (const __be32 *)src; + __be32 *const out_blk = (__be32 *)dst; +- const u32 *P = ((struct bf_ctx *)ctx)->p; +- const u32 *S = ((struct bf_ctx *)ctx)->s; ++ const u32 *P = ctx->p; ++ const u32 *S = ctx->s; + u32 yl = be32_to_cpu(in_blk[0]); + u32 yr = be32_to_cpu(in_blk[1]); + +@@ -398,12 +399,14 @@ + /* + * Calculates the blowfish S and P boxes for encryption and decryption. + */ +-static int bf_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) ++static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { ++ struct bf_ctx *ctx = crypto_tfm_ctx(tfm); ++ u32 *P = ctx->p; ++ u32 *S = ctx->s; + short i, j, count; + u32 data[2], temp; +- u32 *P = ((struct bf_ctx *)ctx)->p; +- u32 *S = ((struct bf_ctx *)ctx)->s; + + /* Copy the initialization s-boxes */ + for (i = 0, count = 0; i < 256; i++) +Index: linux-2.6.16.50/crypto/cast5.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/cast5.c 2006-07-14 18:09:26.363437500 +1200 ++++ linux-2.6.16.50/crypto/cast5.c 2006-07-14 18:10:31.171487750 +1200 +@@ -577,9 +577,9 @@ + (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) + + +-static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) ++static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) + { +- struct cast5_ctx *c = (struct cast5_ctx *) ctx; ++ struct cast5_ctx *c = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)inbuf; + __be32 *dst = (__be32 *)outbuf; + u32 l, r, t; +@@ -642,9 +642,9 @@ + dst[1] = cpu_to_be32(l); + } + +-static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) ++static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) + { +- struct cast5_ctx *c = (struct cast5_ctx *) ctx; ++ struct cast5_ctx *c = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)inbuf; + __be32 *dst = (__be32 *)outbuf; + u32 l, r, t; +@@ -769,15 +769,15 @@ + } + + +-static int +-cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) ++static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned key_len, u32 *flags) + { ++ struct cast5_ctx *c = crypto_tfm_ctx(tfm); + int i; + u32 x[4]; + u32 z[4]; + u32 k[16]; + __be32 p_key[4]; +- struct cast5_ctx *c = (struct cast5_ctx *) ctx; + + if (key_len < 5 || key_len > 16) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; +Index: linux-2.6.16.50/crypto/cast6.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/cast6.c 2006-07-14 18:09:26.363437500 +1200 ++++ linux-2.6.16.50/crypto/cast6.c 2006-07-14 18:10:31.175488000 +1200 +@@ -381,13 +381,13 @@ + key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); + } + +-static int +-cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) ++static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned key_len, u32 *flags) + { + int i; + u32 key[8]; + __be32 p_key[8]; /* padded key */ +- struct cast6_ctx *c = (struct cast6_ctx *) ctx; ++ struct cast6_ctx *c = crypto_tfm_ctx(tfm); + + if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; +@@ -444,8 +444,9 @@ + block[2] ^= F1(block[3], Kr[0], Km[0]); + } + +-static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { +- struct cast6_ctx * c = (struct cast6_ctx *)ctx; ++static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) ++{ ++ struct cast6_ctx *c = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)inbuf; + __be32 *dst = (__be32 *)outbuf; + u32 block[4]; +@@ -476,8 +477,8 @@ + dst[3] = cpu_to_be32(block[3]); + } + +-static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { +- struct cast6_ctx * c = (struct cast6_ctx *)ctx; ++static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { ++ struct cast6_ctx * c = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)inbuf; + __be32 *dst = (__be32 *)outbuf; + u32 block[4]; +Index: linux-2.6.16.50/crypto/cipher.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/cipher.c 2006-07-14 18:09:26.367437750 +1200 ++++ linux-2.6.16.50/crypto/cipher.c 2006-07-14 18:10:31.179488250 +1200 +@@ -187,7 +187,7 @@ + void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; + int bsize = crypto_tfm_alg_blocksize(tfm); + +- void (*fn)(void *, u8 *, const u8 *) = desc->crfn; ++ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; + u8 *iv = desc->info; + unsigned int done = 0; + +@@ -195,7 +195,7 @@ + + do { + xor(iv, src); +- fn(crypto_tfm_ctx(tfm), dst, iv); ++ fn(tfm, dst, iv); + memcpy(iv, dst, bsize); + + src += bsize; +@@ -218,7 +218,7 @@ + u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); + u8 **dst_p = src == dst ? &buf : &dst; + +- void (*fn)(void *, u8 *, const u8 *) = desc->crfn; ++ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; + u8 *iv = desc->info; + unsigned int done = 0; + +@@ -227,7 +227,7 @@ + do { + u8 *tmp_dst = *dst_p; + +- fn(crypto_tfm_ctx(tfm), tmp_dst, src); ++ fn(tfm, tmp_dst, src); + xor(tmp_dst, iv); + memcpy(iv, src, bsize); + if (tmp_dst != dst) +@@ -245,13 +245,13 @@ + { + struct crypto_tfm *tfm = desc->tfm; + int bsize = crypto_tfm_alg_blocksize(tfm); +- void (*fn)(void *, u8 *, const u8 *) = desc->crfn; ++ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; + unsigned int done = 0; + + nbytes -= bsize; + + do { +- fn(crypto_tfm_ctx(tfm), dst, src); ++ fn(tfm, dst, src); + + src += bsize; + dst += bsize; +@@ -268,7 +268,7 @@ + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } else +- return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen, ++ return cia->cia_setkey(tfm, key, keylen, + &tfm->crt_flags); + } + +Index: linux-2.6.16.50/crypto/compress.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/compress.c 2006-07-14 18:09:26.367437750 +1200 ++++ linux-2.6.16.50/crypto/compress.c 2006-07-14 18:10:31.183488500 +1200 +@@ -22,8 +22,7 @@ + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) + { +- return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm), +- src, slen, dst, ++ return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, + dlen); + } + +@@ -31,8 +30,7 @@ + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) + { +- return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm), +- src, slen, dst, ++ return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, + dlen); + } + +@@ -43,21 +41,14 @@ + + int crypto_init_compress_ops(struct crypto_tfm *tfm) + { +- int ret = 0; + struct compress_tfm *ops = &tfm->crt_compress; +- +- ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm)); +- if (ret) +- goto out; + + ops->cot_compress = crypto_compress; + ops->cot_decompress = crypto_decompress; + +-out: +- return ret; ++ return 0; + } + + void crypto_exit_compress_ops(struct crypto_tfm *tfm) + { +- tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm)); + } +Index: linux-2.6.16.50/crypto/crc32c.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/crc32c.c 2006-07-14 18:09:26.367437750 +1200 ++++ linux-2.6.16.50/crypto/crc32c.c 2006-07-14 18:10:31.183488500 +1200 +@@ -31,9 +31,9 @@ + * crc using table. + */ + +-static void chksum_init(void *ctx) ++static void chksum_init(struct crypto_tfm *tfm) + { +- struct chksum_ctx *mctx = ctx; ++ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->crc = ~(u32)0; /* common usage */ + } +@@ -43,10 +43,10 @@ + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +-static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen, +- u32 *flags) ++static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { +- struct chksum_ctx *mctx = ctx; ++ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + if (keylen != sizeof(mctx->crc)) { + if (flags) +@@ -57,9 +57,10 @@ + return 0; + } + +-static void chksum_update(void *ctx, const u8 *data, unsigned int length) ++static void chksum_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int length) + { +- struct chksum_ctx *mctx = ctx; ++ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + u32 mcrc; + + mcrc = crc32c(mctx->crc, data, (size_t)length); +@@ -67,9 +68,9 @@ + mctx->crc = mcrc; + } + +-static void chksum_final(void *ctx, u8 *out) ++static void chksum_final(struct crypto_tfm *tfm, u8 *out) + { +- struct chksum_ctx *mctx = ctx; ++ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + u32 mcrc = (mctx->crc ^ ~(u32)0); + + *(u32 *)out = __le32_to_cpu(mcrc); +Index: linux-2.6.16.50/crypto/crypto_null.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/crypto_null.c 2006-07-14 18:09:26.371438000 +1200 ++++ linux-2.6.16.50/crypto/crypto_null.c 2006-07-14 18:10:31.187488750 +1200 +@@ -27,8 +27,8 @@ + #define NULL_BLOCK_SIZE 1 + #define NULL_DIGEST_SIZE 0 + +-static int null_compress(void *ctx, const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen) ++static int null_compress(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen) + { + if (slen > *dlen) + return -EINVAL; +@@ -37,20 +37,21 @@ + return 0; + } + +-static void null_init(void *ctx) ++static void null_init(struct crypto_tfm *tfm) + { } + +-static void null_update(void *ctx, const u8 *data, unsigned int len) ++static void null_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { } + +-static void null_final(void *ctx, u8 *out) ++static void null_final(struct crypto_tfm *tfm, u8 *out) + { } + +-static int null_setkey(void *ctx, const u8 *key, +- unsigned int keylen, u32 *flags) ++static int null_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { return 0; } + +-static void null_crypt(void *ctx, u8 *dst, const u8 *src) ++static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { + memcpy(dst, src, NULL_BLOCK_SIZE); + } +Index: linux-2.6.16.50/crypto/digest.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/digest.c 2006-07-14 18:09:26.371438000 +1200 ++++ linux-2.6.16.50/crypto/digest.c 2006-07-14 18:10:31.191489000 +1200 +@@ -20,13 +20,14 @@ + + static void init(struct crypto_tfm *tfm) + { +- tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm)); ++ tfm->__crt_alg->cra_digest.dia_init(tfm); + } + + static void update(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg) + { + unsigned int i; ++ unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); + + for (i = 0; i < nsg; i++) { + +@@ -38,12 +39,22 @@ + unsigned int bytes_from_page = min(l, ((unsigned int) + (PAGE_SIZE)) - + offset); +- char *p = crypto_kmap(pg, 0) + offset; ++ char *src = crypto_kmap(pg, 0); ++ char *p = src + offset; + +- tfm->__crt_alg->cra_digest.dia_update +- (crypto_tfm_ctx(tfm), p, +- bytes_from_page); +- crypto_kunmap(p, 0); ++ if (unlikely(offset & alignmask)) { ++ unsigned int bytes = ++ alignmask + 1 - (offset & alignmask); ++ bytes = min(bytes, bytes_from_page); ++ tfm->__crt_alg->cra_digest.dia_update(tfm, p, ++ bytes); ++ p += bytes; ++ bytes_from_page -= bytes; ++ l -= bytes; ++ } ++ tfm->__crt_alg->cra_digest.dia_update(tfm, p, ++ bytes_from_page); ++ crypto_kunmap(src, 0); + crypto_yield(tfm); + offset = 0; + pg++; +@@ -54,7 +65,15 @@ + + static void final(struct crypto_tfm *tfm, u8 *out) + { +- tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); ++ unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); ++ if (unlikely((unsigned long)out & alignmask)) { ++ unsigned int size = crypto_tfm_alg_digestsize(tfm); ++ u8 buffer[size + alignmask]; ++ u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); ++ tfm->__crt_alg->cra_digest.dia_final(tfm, dst); ++ memcpy(out, dst, size); ++ } else ++ tfm->__crt_alg->cra_digest.dia_final(tfm, out); + } + + static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) +@@ -62,25 +81,15 @@ + u32 flags; + if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) + return -ENOSYS; +- return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm), +- key, keylen, &flags); ++ return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags); + } + + static void digest(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg, u8 *out) + { +- unsigned int i; +- +- tfm->crt_digest.dit_init(tfm); +- +- for (i = 0; i < nsg; i++) { +- char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; +- tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), +- p, sg[i].length); +- crypto_kunmap(p, 0); +- crypto_yield(tfm); +- } +- crypto_digest_final(tfm, out); ++ init(tfm); ++ update(tfm, sg, nsg); ++ final(tfm, out); + } + + int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) +Index: linux-2.6.16.50/crypto/khazad.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/khazad.c 2006-07-14 18:09:26.371438000 +1200 ++++ linux-2.6.16.50/crypto/khazad.c 2006-07-14 18:10:31.195489250 +1200 +@@ -754,11 +754,11 @@ + 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL + }; + +-static int khazad_setkey(void *ctx_arg, const u8 *in_key, +- unsigned int key_len, u32 *flags) ++static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) + { +- struct khazad_ctx *ctx = ctx_arg; +- const __be64 *key = (const __be64 *)in_key; ++ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); ++ const __be32 *key = (const __be32 *)in_key; + int r; + const u64 *S = T7; + u64 K2, K1; +@@ -769,8 +769,9 @@ + return -EINVAL; + } + +- K2 = be64_to_cpu(key[0]); +- K1 = be64_to_cpu(key[1]); ++ /* key is supposed to be 32-bit aligned */ ++ K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); ++ K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); + + /* setup the encrypt key */ + for (r = 0; r <= KHAZAD_ROUNDS; r++) { +@@ -840,15 +841,15 @@ + *dst = cpu_to_be64(state); + } + +-static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ++static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct khazad_ctx *ctx = ctx_arg; ++ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); + khazad_crypt(ctx->E, dst, src); + } + +-static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src) ++static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + { +- struct khazad_ctx *ctx = ctx_arg; ++ struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); + khazad_crypt(ctx->D, dst, src); + } + +Index: linux-2.6.16.50/crypto/md4.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/md4.c 2006-07-14 18:09:26.375438250 +1200 ++++ linux-2.6.16.50/crypto/md4.c 2006-07-14 18:10:31.199489500 +1200 +@@ -152,9 +152,9 @@ + md4_transform(ctx->hash, ctx->block); + } + +-static void md4_init(void *ctx) ++static void md4_init(struct crypto_tfm *tfm) + { +- struct md4_ctx *mctx = ctx; ++ struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->hash[0] = 0x67452301; + mctx->hash[1] = 0xefcdab89; +@@ -163,9 +163,9 @@ + mctx->byte_count = 0; + } + +-static void md4_update(void *ctx, const u8 *data, unsigned int len) ++static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) + { +- struct md4_ctx *mctx = ctx; ++ struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + + mctx->byte_count += len; +@@ -193,9 +193,9 @@ + memcpy(mctx->block, data, len); + } + +-static void md4_final(void *ctx, u8 *out) ++static void md4_final(struct crypto_tfm *tfm, u8 *out) + { +- struct md4_ctx *mctx = ctx; ++ struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + const unsigned int offset = mctx->byte_count & 0x3f; + char *p = (char *)mctx->block + offset; + int padding = 56 - (offset + 1); +Index: linux-2.6.16.50/crypto/md5.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/md5.c 2006-07-14 18:09:26.375438250 +1200 ++++ linux-2.6.16.50/crypto/md5.c 2006-07-14 18:10:31.199489500 +1200 +@@ -147,9 +147,9 @@ + md5_transform(ctx->hash, ctx->block); + } + +-static void md5_init(void *ctx) ++static void md5_init(struct crypto_tfm *tfm) + { +- struct md5_ctx *mctx = ctx; ++ struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->hash[0] = 0x67452301; + mctx->hash[1] = 0xefcdab89; +@@ -158,9 +158,9 @@ + mctx->byte_count = 0; + } + +-static void md5_update(void *ctx, const u8 *data, unsigned int len) ++static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) + { +- struct md5_ctx *mctx = ctx; ++ struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + + mctx->byte_count += len; +@@ -188,9 +188,9 @@ + memcpy(mctx->block, data, len); + } + +-static void md5_final(void *ctx, u8 *out) ++static void md5_final(struct crypto_tfm *tfm, u8 *out) + { +- struct md5_ctx *mctx = ctx; ++ struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + const unsigned int offset = mctx->byte_count & 0x3f; + char *p = (char *)mctx->block + offset; + int padding = 56 - (offset + 1); +Index: linux-2.6.16.50/crypto/michael_mic.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/michael_mic.c 2006-07-14 18:09:26.375438250 +1200 ++++ linux-2.6.16.50/crypto/michael_mic.c 2006-07-14 18:10:31.203489750 +1200 +@@ -45,16 +45,17 @@ + } while (0) + + +-static void michael_init(void *ctx) ++static void michael_init(struct crypto_tfm *tfm) + { +- struct michael_mic_ctx *mctx = ctx; ++ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->pending_len = 0; + } + + +-static void michael_update(void *ctx, const u8 *data, unsigned int len) ++static void michael_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { +- struct michael_mic_ctx *mctx = ctx; ++ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + const __le32 *src; + + if (mctx->pending_len) { +@@ -90,9 +91,9 @@ + } + + +-static void michael_final(void *ctx, u8 *out) ++static void michael_final(struct crypto_tfm *tfm, u8 *out) + { +- struct michael_mic_ctx *mctx = ctx; ++ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + u8 *data = mctx->pending; + __le32 *dst = (__le32 *)out; + +@@ -121,10 +122,10 @@ + } + + +-static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, +- u32 *flags) ++static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen, u32 *flags) + { +- struct michael_mic_ctx *mctx = ctx; ++ struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + const __le32 *data = (const __le32 *)key; + + if (keylen != 8) { +@@ -145,6 +146,7 @@ + .cra_blocksize = 8, + .cra_ctxsize = sizeof(struct michael_mic_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = 8, +Index: linux-2.6.16.50/crypto/sha1.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/sha1.c 2006-07-14 18:09:26.379438500 +1200 ++++ linux-2.6.16.50/crypto/sha1.c 2006-07-18 01:35:17.455238201 +1200 +@@ -34,9 +34,9 @@ + u8 buffer[64]; + }; + +-static void sha1_init(void *ctx) ++static void sha1_init(struct crypto_tfm *tfm) + { +- struct sha1_ctx *sctx = ctx; ++ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + static const struct sha1_ctx initstate = { + 0, + { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }, +@@ -46,9 +46,10 @@ + *sctx = initstate; + } + +-static void sha1_update(void *ctx, const u8 *data, unsigned int len) ++static void sha1_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { +- struct sha1_ctx *sctx = ctx; ++ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int partial, done; + const u8 *src; + +@@ -80,9 +81,9 @@ + + + /* Add padding and return the message digest. */ +-static void sha1_final(void* ctx, u8 *out) ++static void sha1_final(struct crypto_tfm *tfm, u8 *out) + { +- struct sha1_ctx *sctx = ctx; ++ struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + __be32 *dst = (__be32 *)out; + u32 i, index, padlen; + __be64 bits; +@@ -93,10 +94,10 @@ + /* Pad out to 56 mod 64 */ + index = sctx->count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); +- sha1_update(sctx, padding, padlen); ++ sha1_update(tfm, padding, padlen); + + /* Append length */ +- sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); ++ sha1_update(tfm, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + for (i = 0; i < 5; i++) +@@ -112,6 +113,7 @@ + .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sha1_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = SHA1_DIGEST_SIZE, +Index: linux-2.6.16.50/crypto/sha256.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/sha256.c 2006-07-14 18:09:26.379438500 +1200 ++++ linux-2.6.16.50/crypto/sha256.c 2006-07-18 01:35:17.455238201 +1200 +@@ -230,9 +230,9 @@ + memset(W, 0, 64 * sizeof(u32)); + } + +-static void sha256_init(void *ctx) ++static void sha256_init(struct crypto_tfm *tfm) + { +- struct sha256_ctx *sctx = ctx; ++ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + sctx->state[0] = H0; + sctx->state[1] = H1; + sctx->state[2] = H2; +@@ -242,12 +242,12 @@ + sctx->state[6] = H6; + sctx->state[7] = H7; + sctx->count[0] = sctx->count[1] = 0; +- memset(sctx->buf, 0, sizeof(sctx->buf)); + } + +-static void sha256_update(void *ctx, const u8 *data, unsigned int len) ++static void sha256_update(struct crypto_tfm *tfm, const u8 *data, ++ unsigned int len) + { +- struct sha256_ctx *sctx = ctx; ++ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int i, index, part_len; + + /* Compute number of bytes mod 128 */ +@@ -277,9 +277,9 @@ + memcpy(&sctx->buf[index], &data[i], len-i); + } + +-static void sha256_final(void* ctx, u8 *out) ++static void sha256_final(struct crypto_tfm *tfm, u8 *out) + { +- struct sha256_ctx *sctx = ctx; ++ struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + __be32 *dst = (__be32 *)out; + __be32 bits[2]; + unsigned int index, pad_len; +@@ -293,10 +293,10 @@ + /* Pad out to 56 mod 64. */ + index = (sctx->count[0] >> 3) & 0x3f; + pad_len = (index < 56) ? (56 - index) : ((64+56) - index); +- sha256_update(sctx, padding, pad_len); ++ sha256_update(tfm, padding, pad_len); + + /* Append length (before padding) */ +- sha256_update(sctx, (const u8 *)bits, sizeof(bits)); ++ sha256_update(tfm, (const u8 *)bits, sizeof(bits)); + + /* Store state in digest */ + for (i = 0; i < 8; i++) +@@ -313,6 +313,7 @@ + .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sha256_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = SHA256_DIGEST_SIZE, +Index: linux-2.6.16.50/crypto/sha512.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/sha512.c 2006-07-14 18:09:26.379438500 +1200 ++++ linux-2.6.16.50/crypto/sha512.c 2006-07-14 18:10:31.211490250 +1200 +@@ -161,9 +161,9 @@ + } + + static void +-sha512_init(void *ctx) ++sha512_init(struct crypto_tfm *tfm) + { +- struct sha512_ctx *sctx = ctx; ++ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + sctx->state[0] = H0; + sctx->state[1] = H1; + sctx->state[2] = H2; +@@ -173,13 +173,12 @@ + sctx->state[6] = H6; + sctx->state[7] = H7; + sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; +- memset(sctx->buf, 0, sizeof(sctx->buf)); + } + + static void +-sha384_init(void *ctx) ++sha384_init(struct crypto_tfm *tfm) + { +- struct sha512_ctx *sctx = ctx; ++ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + sctx->state[0] = HP0; + sctx->state[1] = HP1; + sctx->state[2] = HP2; +@@ -189,13 +188,12 @@ + sctx->state[6] = HP6; + sctx->state[7] = HP7; + sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; +- memset(sctx->buf, 0, sizeof(sctx->buf)); + } + + static void +-sha512_update(void *ctx, const u8 *data, unsigned int len) ++sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) + { +- struct sha512_ctx *sctx = ctx; ++ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + + unsigned int i, index, part_len; + +@@ -233,9 +231,9 @@ + } + + static void +-sha512_final(void *ctx, u8 *hash) ++sha512_final(struct crypto_tfm *tfm, u8 *hash) + { +- struct sha512_ctx *sctx = ctx; ++ struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + static u8 padding[128] = { 0x80, }; + __be64 *dst = (__be64 *)hash; + __be32 bits[4]; +@@ -251,10 +249,10 @@ + /* Pad out to 112 mod 128. */ + index = (sctx->count[0] >> 3) & 0x7f; + pad_len = (index < 112) ? (112 - index) : ((128+112) - index); +- sha512_update(sctx, padding, pad_len); ++ sha512_update(tfm, padding, pad_len); + + /* Append length (before padding) */ +- sha512_update(sctx, (const u8 *)bits, sizeof(bits)); ++ sha512_update(tfm, (const u8 *)bits, sizeof(bits)); + + /* Store state in digest */ + for (i = 0; i < 8; i++) +@@ -264,12 +262,11 @@ + memset(sctx, 0, sizeof(struct sha512_ctx)); + } + +-static void sha384_final(void *ctx, u8 *hash) ++static void sha384_final(struct crypto_tfm *tfm, u8 *hash) + { +- struct sha512_ctx *sctx = ctx; + u8 D[64]; + +- sha512_final(sctx, D); ++ sha512_final(tfm, D); + + memcpy(hash, D, 48); + memset(D, 0, 64); +@@ -281,6 +278,7 @@ + .cra_blocksize = SHA512_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sha512_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(sha512.cra_list), + .cra_u = { .digest = { + .dia_digestsize = SHA512_DIGEST_SIZE, +@@ -295,6 +293,7 @@ + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = SHA384_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sha512_ctx), ++ .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha384.cra_list), + .cra_u = { .digest = { +Index: linux-2.6.16.50/crypto/tcrypt.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/tcrypt.c 2006-07-14 18:09:26.379438500 +1200 ++++ linux-2.6.16.50/crypto/tcrypt.c 2006-07-18 01:36:18.591058951 +1200 +@@ -570,6 +570,122 @@ + crypto_free_tfm(tfm); + } + ++static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen, ++ int plen, char *out, int sec) ++{ ++ struct scatterlist sg[1]; ++ unsigned long start, end; ++ int bcount, pcount; ++ ++ for (start = jiffies, end = start + sec * HZ, bcount = 0; ++ time_before(jiffies, end); bcount++) { ++ crypto_digest_init(tfm); ++ for (pcount = 0; pcount < blen; pcount += plen) { ++ sg_set_buf(sg, p + pcount, plen); ++ crypto_digest_update(tfm, sg, 1); ++ } ++ /* we assume there is enough space in 'out' for the result */ ++ crypto_digest_final(tfm, out); ++ } ++ ++ printk("%6u opers/sec, %9lu bytes/sec\n", ++ bcount / sec, ((long)bcount * blen) / sec); ++ ++ return; ++} ++ ++static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen, ++ int plen, char *out) ++{ ++ struct scatterlist sg[1]; ++ unsigned long cycles = 0; ++ int i, pcount; ++ ++ local_bh_disable(); ++ local_irq_disable(); ++ ++ /* Warm-up run. */ ++ for (i = 0; i < 4; i++) { ++ crypto_digest_init(tfm); ++ for (pcount = 0; pcount < blen; pcount += plen) { ++ sg_set_buf(sg, p + pcount, plen); ++ crypto_digest_update(tfm, sg, 1); ++ } ++ crypto_digest_final(tfm, out); ++ } ++ ++ /* The real thing. */ ++ for (i = 0; i < 8; i++) { ++ cycles_t start, end; ++ ++ crypto_digest_init(tfm); ++ ++ start = get_cycles(); ++ ++ for (pcount = 0; pcount < blen; pcount += plen) { ++ sg_set_buf(sg, p + pcount, plen); ++ crypto_digest_update(tfm, sg, 1); ++ } ++ crypto_digest_final(tfm, out); ++ ++ end = get_cycles(); ++ ++ cycles += end - start; ++ } ++ ++ local_irq_enable(); ++ local_bh_enable(); ++ ++ printk("%6lu cycles/operation, %4lu cycles/byte\n", ++ cycles / 8, cycles / (8 * blen)); ++ ++ return; ++} ++ ++static void test_digest_speed(char *algo, unsigned int sec, ++ struct digest_speed *speed) ++{ ++ struct crypto_tfm *tfm; ++ char output[1024]; ++ int i; ++ ++ printk("\ntesting speed of %s\n", algo); ++ ++ tfm = crypto_alloc_tfm(algo, 0); ++ ++ if (tfm == NULL) { ++ printk("failed to load transform for %s\n", algo); ++ return; ++ } ++ ++ if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) { ++ printk("digestsize(%u) > outputbuffer(%zu)\n", ++ crypto_tfm_alg_digestsize(tfm), sizeof(output)); ++ goto out; ++ } ++ ++ for (i = 0; speed[i].blen != 0; i++) { ++ if (speed[i].blen > TVMEMSIZE) { ++ printk("template (%u) too big for tvmem (%u)\n", ++ speed[i].blen, TVMEMSIZE); ++ goto out; ++ } ++ ++ printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", ++ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); ++ ++ memset(tvmem, 0xff, speed[i].blen); ++ ++ if (sec) ++ test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec); ++ else ++ test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output); ++ } ++ ++out: ++ crypto_free_tfm(tfm); ++} ++ + static void test_deflate(void) + { + unsigned int i; +@@ -1086,6 +1202,60 @@ + des_speed_template); + break; + ++ case 300: ++ /* fall through */ ++ ++ case 301: ++ test_digest_speed("md4", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 302: ++ test_digest_speed("md5", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 303: ++ test_digest_speed("sha1", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 304: ++ test_digest_speed("sha256", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 305: ++ test_digest_speed("sha384", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 306: ++ test_digest_speed("sha512", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 307: ++ test_digest_speed("wp256", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 308: ++ test_digest_speed("wp384", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 309: ++ test_digest_speed("wp512", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 310: ++ test_digest_speed("tgr128", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 311: ++ test_digest_speed("tgr160", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 312: ++ test_digest_speed("tgr192", sec, generic_digest_speed_template); ++ if (mode > 300 && mode < 400) break; ++ ++ case 399: ++ break; ++ + case 1000: + test_available(); + break; +@@ -1113,7 +1283,14 @@ + + kfree(xbuf); + kfree(tvmem); +- return 0; ++ ++ /* We intentionaly return -EAGAIN to prevent keeping ++ * the module. It does all its work from init() ++ * and doesn't offer any runtime functionality ++ * => we don't need it in the memory, do we? ++ * -- mludvig ++ */ ++ return -EAGAIN; + } + + /* +Index: linux-2.6.16.50/crypto/tea.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/tea.c 2006-07-14 18:09:26.383438750 +1200 ++++ linux-2.6.16.50/crypto/tea.c 2006-07-14 18:10:31.223491000 +1200 +@@ -45,10 +45,10 @@ + u32 KEY[4]; + }; + +-static int tea_setkey(void *ctx_arg, const u8 *in_key, +- unsigned int key_len, u32 *flags) +-{ +- struct tea_ctx *ctx = ctx_arg; ++static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) ++{ ++ struct tea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + + if (key_len != 16) +@@ -66,12 +66,11 @@ + + } + +-static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, n, sum = 0; + u32 k0, k1, k2, k3; +- +- struct tea_ctx *ctx = ctx_arg; ++ struct tea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +@@ -95,11 +94,11 @@ + out[1] = cpu_to_le32(z); + } + +-static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, n, sum; + u32 k0, k1, k2, k3; +- struct tea_ctx *ctx = ctx_arg; ++ struct tea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +@@ -125,10 +124,10 @@ + out[1] = cpu_to_le32(z); + } + +-static int xtea_setkey(void *ctx_arg, const u8 *in_key, +- unsigned int key_len, u32 *flags) +-{ +- struct xtea_ctx *ctx = ctx_arg; ++static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, ++ unsigned int key_len, u32 *flags) ++{ ++ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + + if (key_len != 16) +@@ -146,12 +145,11 @@ + + } + +-static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, sum = 0; + u32 limit = XTEA_DELTA * XTEA_ROUNDS; +- +- struct xtea_ctx *ctx = ctx_arg; ++ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +@@ -168,10 +166,10 @@ + out[1] = cpu_to_le32(z); + } + +-static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, sum; +- struct tea_ctx *ctx = ctx_arg; ++ struct tea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +@@ -191,12 +189,11 @@ + } + + +-static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, sum = 0; + u32 limit = XTEA_DELTA * XTEA_ROUNDS; +- +- struct xtea_ctx *ctx = ctx_arg; ++ struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +@@ -213,10 +210,10 @@ + out[1] = cpu_to_le32(z); + } + +-static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +-{ ++static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ++{ + u32 y, z, sum; +- struct tea_ctx *ctx = ctx_arg; ++ struct tea_ctx *ctx = crypto_tfm_ctx(tfm); + const __le32 *in = (const __le32 *)src; + __le32 *out = (__le32 *)dst; + +Index: linux-2.6.16.50/crypto/tgr192.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/tgr192.c 2006-07-14 18:09:26.383438750 +1200 ++++ linux-2.6.16.50/crypto/tgr192.c 2006-07-14 18:10:31.227491250 +1200 +@@ -496,11 +496,10 @@ + tctx->c = c; + } + +-static void tgr192_init(void *ctx) ++static void tgr192_init(struct crypto_tfm *tfm) + { +- struct tgr192_ctx *tctx = ctx; ++ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + +- memset (tctx->hash, 0, 64); + tctx->a = 0x0123456789abcdefULL; + tctx->b = 0xfedcba9876543210ULL; + tctx->c = 0xf096a5b4c3b2e187ULL; +@@ -511,9 +510,10 @@ + + /* Update the message digest with the contents + * of INBUF with length INLEN. */ +-static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) ++static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf, ++ unsigned int len) + { +- struct tgr192_ctx *tctx = ctx; ++ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + + if (tctx->count == 64) { /* flush the buffer */ + tgr192_transform(tctx, tctx->hash); +@@ -527,7 +527,7 @@ + for (; len && tctx->count < 64; len--) { + tctx->hash[tctx->count++] = *inbuf++; + } +- tgr192_update(tctx, NULL, 0); ++ tgr192_update(tfm, NULL, 0); + if (!len) { + return; + } +@@ -549,15 +549,15 @@ + + + /* The routine terminates the computation */ +-static void tgr192_final(void *ctx, u8 * out) ++static void tgr192_final(struct crypto_tfm *tfm, u8 * out) + { +- struct tgr192_ctx *tctx = ctx; ++ struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + __be64 *dst = (__be64 *)out; + __be64 *be64p; + __le32 *le32p; + u32 t, msb, lsb; + +- tgr192_update(tctx, NULL, 0); /* flush */ ; ++ tgr192_update(tfm, NULL, 0); /* flush */ ; + + msb = 0; + t = tctx->nblocks; +@@ -585,7 +585,7 @@ + while (tctx->count < 64) { + tctx->hash[tctx->count++] = 0; + } +- tgr192_update(tctx, NULL, 0); /* flush */ ; ++ tgr192_update(tfm, NULL, 0); /* flush */ ; + memset(tctx->hash, 0, 56); /* fill next block with zeroes */ + } + /* append the 64 bit count */ +@@ -601,22 +601,20 @@ + dst[2] = be64p[2] = cpu_to_be64(tctx->c); + } + +-static void tgr160_final(void *ctx, u8 * out) ++static void tgr160_final(struct crypto_tfm *tfm, u8 * out) + { +- struct tgr192_ctx *wctx = ctx; + u8 D[64]; + +- tgr192_final(wctx, D); ++ tgr192_final(tfm, D); + memcpy(out, D, TGR160_DIGEST_SIZE); + memset(D, 0, TGR192_DIGEST_SIZE); + } + +-static void tgr128_final(void *ctx, u8 * out) ++static void tgr128_final(struct crypto_tfm *tfm, u8 * out) + { +- struct tgr192_ctx *wctx = ctx; + u8 D[64]; + +- tgr192_final(wctx, D); ++ tgr192_final(tfm, D); + memcpy(out, D, TGR128_DIGEST_SIZE); + memset(D, 0, TGR192_DIGEST_SIZE); + } +@@ -627,6 +625,7 @@ + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tgr192_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 7, + .cra_list = LIST_HEAD_INIT(tgr192.cra_list), + .cra_u = {.digest = { + .dia_digestsize = TGR192_DIGEST_SIZE, +@@ -641,6 +640,7 @@ + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tgr192_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 7, + .cra_list = LIST_HEAD_INIT(tgr160.cra_list), + .cra_u = {.digest = { + .dia_digestsize = TGR160_DIGEST_SIZE, +@@ -655,6 +655,7 @@ + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tgr192_ctx), + .cra_module = THIS_MODULE, ++ .cra_alignmask = 7, + .cra_list = LIST_HEAD_INIT(tgr128.cra_list), + .cra_u = {.digest = { + .dia_digestsize = TGR128_DIGEST_SIZE, +Index: linux-2.6.16.50/crypto/wp512.c +=================================================================== +--- linux-2.6.16.50.orig/crypto/wp512.c 2006-07-14 18:09:26.383438750 +1200 ++++ linux-2.6.16.50/crypto/wp512.c 2006-07-14 18:10:31.235491750 +1200 +@@ -981,9 +981,9 @@ + + } + +-static void wp512_init (void *ctx) { ++static void wp512_init(struct crypto_tfm *tfm) { ++ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); + int i; +- struct wp512_ctx *wctx = ctx; + + memset(wctx->bitLength, 0, 32); + wctx->bufferBits = wctx->bufferPos = 0; +@@ -993,10 +993,10 @@ + } + } + +-static void wp512_update(void *ctx, const u8 *source, unsigned int len) ++static void wp512_update(struct crypto_tfm *tfm, const u8 *source, ++ unsigned int len) + { +- +- struct wp512_ctx *wctx = ctx; ++ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); + int sourcePos = 0; + unsigned int bits_len = len * 8; // convert to number of bits + int sourceGap = (8 - ((int)bits_len & 7)) & 7; +@@ -1054,9 +1054,9 @@ + + } + +-static void wp512_final(void *ctx, u8 *out) ++static void wp512_final(struct crypto_tfm *tfm, u8 *out) + { +- struct wp512_ctx *wctx = ctx; ++ struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); + int i; + u8 *buffer = wctx->buffer; + u8 *bitLength = wctx->bitLength; +@@ -1087,22 +1087,20 @@ + wctx->bufferPos = bufferPos; + } + +-static void wp384_final(void *ctx, u8 *out) ++static void wp384_final(struct crypto_tfm *tfm, u8 *out) + { +- struct wp512_ctx *wctx = ctx; + u8 D[64]; + +- wp512_final (wctx, D); ++ wp512_final(tfm, D); + memcpy (out, D, WP384_DIGEST_SIZE); + memset (D, 0, WP512_DIGEST_SIZE); + } + +-static void wp256_final(void *ctx, u8 *out) ++static void wp256_final(struct crypto_tfm *tfm, u8 *out) + { +- struct wp512_ctx *wctx = ctx; + u8 D[64]; + +- wp512_final (wctx, D); ++ wp512_final(tfm, D); + memcpy (out, D, WP256_DIGEST_SIZE); + memset (D, 0, WP512_DIGEST_SIZE); + } +Index: linux-2.6.16.50/arch/i386/kernel/cpu/proc.c +=================================================================== +--- linux-2.6.16.50.orig/arch/i386/kernel/cpu/proc.c 2006-07-15 00:03:51.220033250 +1200 ++++ linux-2.6.16.50/arch/i386/kernel/cpu/proc.c 2006-07-15 00:04:02.552741500 +1200 +@@ -52,7 +52,7 @@ + + /* VIA/Cyrix/Centaur-defined */ + NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", +- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ++ "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + +Index: linux-2.6.16.50/include/asm-i386/cpufeature.h +=================================================================== +--- linux-2.6.16.50.orig/include/asm-i386/cpufeature.h 2006-07-15 00:03:51.648060000 +1200 ++++ linux-2.6.16.50/include/asm-i386/cpufeature.h 2006-07-15 00:04:02.552741500 +1200 +@@ -86,6 +86,12 @@ + #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ + #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ + #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ ++#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ ++#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ ++#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ ++#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ ++#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ ++#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ + + /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ + #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ +@@ -119,6 +125,12 @@ + #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) + #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) + #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) ++#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) ++#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) ++#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) ++#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) ++#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) ++#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) + + #endif /* __ASM_I386_CPUFEATURE_H */ + diff --git a/src/scripts/vpn-watch b/src/scripts/vpn-watch index 22107d02a..8bd752157 100755 --- a/src/scripts/vpn-watch +++ b/src/scripts/vpn-watch @@ -1,171 +1,239 @@ -#!/bin/sh -# -# IPFire script - vpn-watch -# -# This code is distributed under the terms of the GPL -# -# (c) Daniel Berlin - Check for -# remote peer with dynamic IPs and restart when change -# is detected. Works with DPD which is not perfect! -# -# 2006: Franck - adapted original script to fit in IPCop 1.4 -# 2007: Michael Tremer - mitch@ipfire.org - Merged into IPFire -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# - -# -# Configuration -# - -VPN_CONFIG='/var/ipfire/vpn/config' # Location of IPFire's vpn configuration file -SETTINGS='/var/ipfire/vpn/settings' # and settings - -CHECK_INTERVAL='60' # Check this often (in seconds) -DNS_RESOLVE_TRIES='4' # Try to resolve IPs this often (each try takes max. 2 seconds) -NICENESS='+5' # Adjust niceness of child processes: '-20' ... '+19'; '0' is default -case "$1" in - 'start' | '--start') - eval $(/usr/local/bin/readhash $SETTINGS) - test "${VPN_WATCH}" != "on" && exit 1 # not activated, cannot start! - - if test ! -r "$VPN_CONFIG"; then - echo 'Error: cannot read IPFire VPN configuration file; exit.' >&2 - exit 1 - fi - - if /usr/bin/test -p /var/run/$(basename $0); then - if ps --no-heading axw | grep -v 'grep' | grep -q "$(basename $0) conn: "; then - echo "Error: use '$(basename $0) stop' please; exit." >&2 - exit 1 - else - rm /var/run/$(basename $0) # pipe was left alone, correct error condition - fi - fi - - # the pipe serves for "-status" but is not used yet - /bin/mknod -m 0660 "/var/run/$(basename $0)" p >/dev/null 2>&1 # Create pipe for status-information - - # - # Read VPN configuration and fork a child process for each VPN connection active, net-to-net & RED - # - while read line; do - VPN=($(echo $line | cut --delimiter=',' --output-delimiter=' ' -f2,3,5,12,28 )) # Activated, Name, Host/Net-to-net, Remote, ITF. - test "${VPN[0]}" != "on" && continue # Ignore: deactivated connections - test "${VPN[2]}" = "host" && continue # Ignore: roadwarriors - ## test "${VPN[4]}" != "RED" && continue # Ignore: local vpns needed or not ? - echo -n "${VPN[3]}" | grep -q '^[[:digit:]\.]\+$' && continue #If fixed remote IP, no need to watch! - $0 'conn:' "${VPN[1]}" "${VPN[3]}">/dev/null 2>&1 & #Fork child process (parameters: "conn: NAME RIGHT") - done < "$VPN_CONFIG" - exit 0 # Parent dies here... RIP - ;; - - 'stop' | '--stop') - # Terminate processes - for proc in $(pidof -x -o %PPID $(basename $0)); do - kill -s SIGTERM -- "$proc" - done - sleep 1 - - # Kill remaining processes - for proc in $(/bin/pidof -x -o %PPID $(basename $0)); do - kill -s SIGKILL -- "$proc" - done - rm -f "/var/run/$(basename $0)" # Remove pipe - exit 0 - ;; - - #'status' | '--status') - # echo "VPN-Watch" - # if ps --no-heading axw | grep -v 'grep' | grep -q "$(basename $0) conn: "; then - # trap '' USR1 - # killall -q -g -s USR1 -- $(basename $0) - # sleep 1 - # cat "/var/run/$(basename $0)" | sort # Read children's info from pipe - # else - # echo ' no instance running.' - # fi - # exit 0 - # ;; - - 'conn:') - # Children proceed here... - renice ${NICENESS:-0} -p $$ >/dev/null 2>&1 # Adjust niceness - shift # Remove the first positional parameter ("conn:"), as we don't need it anymore - ;; - *) - /bin/echo "Usage: $0 { start | stop }" >&2 - exit 1 - ;; -esac - -# Logging, signal handlers -alias log="logger -t vpn-watch \'${1}\':" - -trap 'log "terminated after ${RESTART_COUNT} restarts."' EXIT -#trap 'echo "connection \"${1}\" restarted ${RESTART_COUNT} times" >>/var/run/$(basename $0)' USR1 - -# -# Get IP of a FQDN... using 'host' command. Everything is ok when dns server responds. -# If no response, -# -maybe RED is down. The script can terminate. It will restart with rc.updatered. -# or -# -the dns server is down. In this case, terminate the script is not a good idea... -# Thus 4 retries before returning response 'stop' -# -function get_ip () { - local RESULT='' - # delay divided by two for each loop - delay=8 - for ((i=1; ${i} <= ${DNS_RESOLVE_TRIES}; i++)); do - - # extract IP address - RESULT=$(/usr/bin/host "$1" 2>/dev/null| awk '{ print $4 }') - if echo -n $RESULT | /bin/grep -q '^[[:digit:]\.]\+$' ; then - echo -n $RESULT - return - fi - - sleep $delay - delay=$((delay>>1)) - done - # Change 'stop' to something else to let the script running - echo -n "stop" # stop: the script will terminate - -} - -# Infinite loop; checks, whether the IP of FQDN has changed. -# If so, the affected connection gets restarted. -# -RESTART_COUNT=0 -REMOTE_IP_OLD=$(get_ip $2) -log "start watching $REMOTE_IP_OLD" - -while [ $REMOTE_IP_OLD != 'stop' ] ; do - sleep $CHECK_INTERVAL - # Skip check until IPSec is running. Update IP_OLD while our ipsec is down - /usr/sbin/ipsec auto --status >/dev/null 2>&1 || { - REMOTE_IP_OLD=$(get_ip $2) - continue - } - - REMOTE_IP_NEW=$(get_ip $2) - - if test "${REMOTE_IP_OLD}" != "${REMOTE_IP_NEW}"; then - /usr/sbin/ipsec auto --down $1 - /usr/sbin/ipsec auto --replace $1 - /usr/sbin/ipsec auto --rereadsecrets - /usr/sbin/ipsec auto --up $1 - let RESTART_COUNT++ - log "Remote IP has changed from $REMOTE_IP_OLD to $REMOTE_IP_NEW. Connection restarted (#$RESTART_COUNT times)." - REMOTE_IP_OLD=$REMOTE_IP_NEW - fi -done +#!/bin/sh +################################################## +##### VPN-Watch.sh Version 1.6.3 ##### +################################################## + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# Written by: Daniel Berlin . +# Download: http://www.itechnology.de/front_content.php?idcat=87 +# + +# changed by: Rüdiger Sobeck +# last changed: 31-01-2006 + +# Configuration +# +CHECK_INTERVAL='120' # Check this often (in seconds) +DNS_RESOLVE_TRIES='3' # Try to resolve IPs this often (each try takes max. 2 seconds) +NICENESS='+5' # Adjust niceness of child processes: '-20' ... '+19'; '0' is default +ipfire_VPN_CONFIG='/var/ipfire/vpn/config' # Location of ipfire's vpn configuration file +ipfire_VPN_SETTINGS='/var/ipfire/vpn/settings' # Location of ipfire's vpn settings file +VERSION='1.6.3' + +# Workaround for nonexistent "nl" command on ipfire 1.4.x +nl --help >/dev/null 2>&1 +if test $? -ne 0; then + alias nl='cat' +fi + +MyHost=`grep VPN_IP /var/ipfire/vpn/settings | cut --delimiter='=' --output-delimiter=' ' -f2` +MyIP=`cat /var/ipfire/red/local-ipaddress` +MyDynDnsIP=`ping -c 1 "$1" 2>/dev/null | head -n1 | awk '{print $3}' | tr -d '()' | tr -d ':'` + +case "$1" in + 'start' | '--start') + if test ! -r "$ipfire_VPN_CONFIG"; then + echo 'Error: cannot read ipfire VPN configuration file; exit.' >&2 + exit 1 + fi + + mknod -m 0660 "/var/run/$(basename $0)" p >/dev/null 2>&1 # Create pipe for status-information + + # Read VPN configuration and fork a child process for each VPN connection + # + while read line; do + VPN=($(echo $line | cut --delimiter=',' --output-delimiter=' ' -f1,2,3,5,6,12)) # + CONNR=${VPN[0]} # connection number + CONACTIVE=${VPN[1]} # active (on|off) + CONNAME=${VPN[2]} # connection name + CONTYPE=${VPN[3]} # connection type (host|net) + CONCERTPSK=${VPN[4]} # key type (cert|psk) + CONDNSNAME=${VPN[5]} # FQDN name of other side + + echo -n "${CONACTIVE}" | grep -qi '^off$' && continue # Ignore: deactivated connections + echo -n "${CONTYPE}" | grep -qi '^host$' && continue # Ignore: Roadwarriors (->DPD) +# echo -n "${VPN[1]}${MyHost}" | grep -q '^[[:digit:]\.]\+$' && continue # Ignore: "left" and "right" side set to an IP + + $0 'conn:' "${CONNAME}" "${MyHost}" "${CONDNSNAME}" "${CONNR}" >/dev/null 2>&1 & # Fork child process (parameters: "conn: NAME LEFT RIGHT NUMBER") + echo -n 'S' + done < "$ipfire_VPN_CONFIG" + echo Â"ÂStarte VPN-Watch" + exit 0 # Parent dies here... RIP + ;; + 'stop' | '--stop') + # Terminate processes + for proc in $(pidof -x -o %PPID $(basename $0)); do + kill -15 $proc + echo -n 'T' + done + sleep 1 + # Kill remaining processes + for proc in $(pidof -x -o %PPID $(basename $0)); do + kill -9 $proc + echo -n 'K' + done + rm -f "/var/run/$(basename $0)" # Remove pipe + echo "Stoppe VPN-Watch" + exit 0 + ;; + 'restart' | '--restart') + $0 stop + $0 start + exit 0 + ;; + 'status' | '--status') + echo "VPN-Watch ${VERSION} (mail: daniel@itechnology.de, web: www.itechnology.de/vpn-watch)" + if ps --no-heading axw | grep -v 'grep' | grep -q "$(basename $0) conn: "; then + trap '' USR1 + killall -q -g -s USR1 -- $(basename $0) + sleep 1 + cat "/var/run/$(basename $0)" | sort | nl # Read children's info from pipe + else + echo ' no instances running.' + fi + exit 0 + ;; + 'conn:') + # Children proceed here... + renice ${NICENESS:-0} -p $$ >/dev/null 2>&1 # Adjust niceness + shift # Remove the first positional parameter ("conn:"), as we don't need it anymore + ;; + *) + echo "Usage: $0 { start | stop | restart | status }" >&2 + exit 1 + ;; +esac + +# Logging, signal handlers +# +alias log="logger -t '$(basename $0 | cut -d '.' -f 1) ${VERSION}' \(${1}\)" +trap 'log "terminated after ${RESTART_COUNT} restarts."' EXIT +trap 'echo "connection \"${1}\" restarted ${RESTART_COUNT} times" >>/var/run/$(basename $0)' USR1 + +log "started" + +# Get IP of a FQDN... using 'arp', 'traceroute' or 'ping', +# because ipfire has no 'nslookup', 'host' or 'dig' command. +# +function get_ip () { + local RESULT='' + for ((i=1; ${i} <= ${DNS_RESOLVE_TRIES}; i++)); do + if which arp >/dev/null 2>&1; then + RESULT=$(arp "$1" 2>/dev/null | awk '{ print $2 }' | tr -d '()') + elif which traceroute >/dev/null 2>&1; then + RESULT=$(traceroute -m1 -q1 "$1" 2>/dev/null | head -n1 | awk '{ print $4 }' | tr -d '(),') + else + RESULT=$(ping -c 1 "$1" 2>/dev/null | head -n1 | awk '{print $3}' | tr -d '()' | tr -d ':') + fi + test -n "$RESULT" && break + done + test -z "$RESULT" && log "Warning: could not resolve ${1} after ${DNS_RESOLVE_TRIES} tries..." + echo -n "$RESULT" +} + +function get_tunnelip () { + file=/var/tmp/$1.remoteip + local TRESULT='' + TVPN=`grep "$1" /var/ipfire/vpn/config| awk 'BEGIN{FS=","}{print $2}'` + DYNHOST=`grep "$1" /var/ipfire/vpn/config| awk 'BEGIN{FS=","}{print $12}'` + CONNR=`grep "$1" /var/ipfire/vpn/config| awk 'BEGIN{FS=","}{print $1}'` + REMOTEIP=`/usr/bin/ping -c 1 "$DYNHOST" 2>/dev/null | head -n1 | awk '{print $3}' | tr -d '()' | tr -d ':'` + if ! test -f $file; then + cat $REMOTEIP > $file + fi + OLDIP=`cat $file` + TUNIP=`ipsec whack --status | grep "$1"` + if [ "$TUNIP" != "" ]; then + TUNIP=`ipsec whack --status | grep "$1" | awk 'BEGIN{FS="["}{print $2}' | awk 'BEGIN{FS="---"}{print $3}'` + log "currently used tunnel IP = $TUNIP, current remote IP = $REMOTEIP" + echo $REMOTEIP > $file + TRESULT=${TUNIP} + fi + + test -n "$TRESULT" && break + test -z "$TRESULT" && log "Warning: could not retrieve last used VPN tunnel IP..." + echo -n "$TRESULT" +} + +# Restarts a VPN connection +# +function restart_vpn () { + if test -x /usr/local/bin/ipsecctrl; then + /usr/local/bin/ipsecctrl D "$1" # This works for ipfire 1.4.x + /usr/local/bin/ipsecctrl R # re-read secrets + /usr/local/bin/ipsecctrl S "$1" # start tunnel + else + ipsec auto --down "$1" # This works for ipfire 1.3.x + ipsec auto --unroute "$1" + ipsec auto --delete "$1" + ipsec auto --rereadall + ipsec auto --add "$1" + ipsec auto --route "$1" + ipsec auto --up "$1" + fi +} + +# Get left and right IP +# +LEFT_IP_OLD=$MyIP +RIGHT_IP_OLD=$(get_ip $3) + +# Infinite loop; checks, whether the IP of a left or right FQDN has changed. +# If so, the affected connection gets restarted; this is logged to syslog. +# +RESTART_COUNT=0 +while :; do + sleep $CHECK_INTERVAL + + # Skip check until IPSec is running + ipsec auto --status >/dev/null 2>&1 || continue + + # get own IP (may have changed) + ThisHostIP=`cat /var/ipfire/red/local-ipaddress` + + # this our own IP as reported in /var/ipfire/ppp/local-ipadress + LEFT_IP_NEW=$ThisHostIP + # check our own DYNDNS IP + LEFT_IP_DYN=$(get_ip $MyHost) + # this is DYNDNS IP of other side + RIGHT_IP_NEW=$(get_ip $3) + # this the last used (right) IP for VPN-Tunnel + RIGHT_TUN_IP_OLD=$(get_tunnelip $1) + +# for whatever reason, ipsec did not notice our own IP has changed for this connection + if [ "${LEFT_IP_NEW}" != "${LEFT_IP_DYN}" ]; then + restart_vpn "$4" + let RESTART_COUNT++ + log "Red IP = $LEFT_IP_NEW, IP by DynDNS = $LEFT_IP_DYN" + log 'incorrect dynamic IP in tunnel used: restarting connection...' + fi + +# left or right IP has changed... + if test "${LEFT_IP_OLD} ${RIGHT_IP_OLD}" != "${LEFT_IP_NEW} ${RIGHT_IP_NEW}"; then + restart_vpn "$4" + let RESTART_COUNT++ + log 'left or right IP has changed: restarting connection...' + fi + +# right IP / IP of tunnel endpoint has changed... + if [ "$RIGHT_TUN_IP_OLD" != "" ]; then + if test "${RIGHT_TUN_IP_OLD}" != "${RIGHT_IP_NEW}"; then + restart_vpn "$4" + let RESTART_COUNT++ + log 'VPN tunnel IP has changed: restarting connection...' + fi + fi + + LEFT_IP_OLD=$LEFT_IP_NEW + RIGHT_IP_OLD=$RIGHT_IP_NEW +done + -- 2.39.2