From: Yangyu Chen Date: Mon, 22 Apr 2024 02:40:25 +0000 (+0800) Subject: chacha-riscv64-v-zbb.pl: better format X-Git-Tag: openssl-3.4.0-alpha1~583 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f6ce48f5b8ad4d8d748ea87d2490cbed08db9936;p=thirdparty%2Fopenssl.git chacha-riscv64-v-zbb.pl: better format This patch merged the `add` and `xor` part of chacha_sub_round, which are same in RISC-V Vector only and Zvkb implementation. There is no change to the generated ASM code except for the indent. Signed-off-by: Yangyu Chen Reviewed-by: Paul Dale Reviewed-by: Tomas Mraz (Merged from https://github.com/openssl/openssl/pull/24069) --- diff --git a/crypto/chacha/asm/chacha-riscv64-v-zbb.pl b/crypto/chacha/asm/chacha-riscv64-v-zbb.pl index 8bc7fbc4ada..6518e3fc244 100755 --- a/crypto/chacha/asm/chacha-riscv64-v-zbb.pl +++ b/crypto/chacha/asm/chacha-riscv64-v-zbb.pl @@ -111,26 +111,29 @@ sub chacha_sub_round { $V_T0, $V_T1, $V_T2, $V_T3, ) = @_; - # a += b; c ^= a; c <<<= $ROL_SHIFT; + # a += b; c ^= a; + my $code = <<___; + @{[vadd_vv $A0, $A0, $B0]} + add $S_A0, $S_A0, $S_B0 + @{[vadd_vv $A1, $A1, $B1]} + add $S_A1, $S_A1, $S_B1 + @{[vadd_vv $A2, $A2, $B2]} + add $S_A2, $S_A2, $S_B2 + @{[vadd_vv $A3, $A3, $B3]} + add $S_A3, $S_A3, $S_B3 + @{[vxor_vv $C0, $C0, $A0]} + xor $S_C0, $S_C0, $S_A0 + @{[vxor_vv $C1, $C1, $A1]} + xor $S_C1, $S_C1, $S_A1 + @{[vxor_vv $C2, $C2, $A2]} + xor $S_C2, $S_C2, $S_A2 + @{[vxor_vv $C3, $C3, $A3]} + xor $S_C3, $S_C3, $S_A3 +___ + # c <<<= $ROL_SHIFT; if ($use_zvkb) { - my $code = <<___; - @{[vadd_vv $A0, $A0, $B0]} - add $S_A0, $S_A0, $S_B0 - @{[vadd_vv $A1, $A1, $B1]} - add $S_A1, $S_A1, $S_B1 - @{[vadd_vv $A2, $A2, $B2]} - add $S_A2, $S_A2, $S_B2 - @{[vadd_vv $A3, $A3, $B3]} - add $S_A3, $S_A3, $S_B3 - @{[vxor_vv $C0, $C0, $A0]} - xor $S_C0, $S_C0, $S_A0 - @{[vxor_vv $C1, $C1, $A1]} - xor $S_C1, $S_C1, $S_A1 - @{[vxor_vv $C2, $C2, $A2]} - xor $S_C2, $S_C2, $S_A2 - @{[vxor_vv $C3, $C3, $A3]} - xor $S_C3, $S_C3, $S_A3 + my $ror_part = <<___; @{[vror_vi $C0, $C0, 32 - $ROL_SHIFT]} @{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]} @{[vror_vi $C1, $C1, 32 - $ROL_SHIFT]} @@ -140,25 +143,10 @@ sub chacha_sub_round { @{[vror_vi $C3, $C3, 32 - $ROL_SHIFT]} @{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]} ___ - return $code; + + $code .= $ror_part; } else { - my $code = <<___; - @{[vadd_vv $A0, $A0, $B0]} - add $S_A0, $S_A0, $S_B0 - @{[vadd_vv $A1, $A1, $B1]} - add $S_A1, $S_A1, $S_B1 - @{[vadd_vv $A2, $A2, $B2]} - add $S_A2, $S_A2, $S_B2 - @{[vadd_vv $A3, $A3, $B3]} - add $S_A3, $S_A3, $S_B3 - @{[vxor_vv $C0, $C0, $A0]} - xor $S_C0, $S_C0, $S_A0 - @{[vxor_vv $C1, $C1, $A1]} - xor $S_C1, $S_C1, $S_A1 - @{[vxor_vv $C2, $C2, $A2]} - xor $S_C2, $S_C2, $S_A2 - @{[vxor_vv $C3, $C3, $A3]} - xor $S_C3, $S_C3, $S_A3 + my $ror_part = <<___; @{[vsll_vi $V_T0, $C0, $ROL_SHIFT]} @{[vsll_vi $V_T1, $C1, $ROL_SHIFT]} @{[vsll_vi $V_T2, $C2, $ROL_SHIFT]} @@ -176,8 +164,11 @@ ___ @{[vor_vv $C3, $C3, $V_T3]} @{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]} ___ - return $code; + + $code .= $ror_part; } + + return $code; } sub chacha_quad_round_group {