Update powerpc64 files.
C powerpc64/fat/aes-decrypt-internal-2.asm
-ifelse(<
+ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
->)
+')
dnl PROLOGUE(_nettle_aes_decrypt) picked up by configure
-define(<fat_transform>, <$1_ppc64>)
-include_src(<powerpc64/p8/aes-decrypt-internal.asm>)
+define(`fat_transform', `$1_ppc64')
+include_src(`powerpc64/p8/aes-decrypt-internal.asm')
C powerpc64/fat/aes-encrypt-internal-2.asm
-ifelse(<
+ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
->)
+')
dnl PROLOGUE(_nettle_aes_encrypt) picked up by configure
-define(<fat_transform>, <$1_ppc64>)
-include_src(<powerpc64/p8/aes-encrypt-internal.asm>)
+define(`fat_transform', `$1_ppc64')
+include_src(`powerpc64/p8/aes-encrypt-internal.asm')
-define(<PROLOGUE>,
-<.globl C_NAME($1)
+define(`PROLOGUE',
+`.globl C_NAME($1)
DECLARE_FUNC(C_NAME($1))
ifelse(WORDS_BIGENDIAN,no,
-<ifdef(<FUNC_ALIGN>,<.align FUNC_ALIGN>)
+`ifdef(`FUNC_ALIGN',`.align FUNC_ALIGN')
C_NAME($1):
addis 2,12,(.TOC.-C_NAME($1))@ha
addi 2,2,(.TOC.-C_NAME($1))@l
-.localentry C_NAME($1), .-C_NAME($1)>,
-<.section ".opd","aw"
+.localentry C_NAME($1), .-C_NAME($1)',
+`.section ".opd","aw"
.align 3
C_NAME($1):
.quad .C_NAME($1),.TOC.@tocbase,0
.previous
-ifdef(<FUNC_ALIGN>,<.align FUNC_ALIGN>)
-.C_NAME($1):>)
-undefine(<FUNC_ALIGN>)>)
+ifdef(`FUNC_ALIGN',`.align FUNC_ALIGN')
+.C_NAME($1):')
+undefine(`FUNC_ALIGN')')
-define(<EPILOGUE>,
-<ifelse(WORDS_BIGENDIAN,no,
-<.size C_NAME($1), . - C_NAME($1)>,
-<.size .C_NAME($1), . - .C_NAME($1)
-.size C_NAME($1), . - .C_NAME($1)>)>)
+define(`EPILOGUE',
+`ifelse(WORDS_BIGENDIAN,no,
+`.size C_NAME($1), . - C_NAME($1)',
+`.size .C_NAME($1), . - .C_NAME($1)
+.size C_NAME($1), . - .C_NAME($1)')')
C Get vector-scalar register from vector register
C VSR(VR)
-define(<VSR>,<32+$1>)
+define(`VSR',`32+$1')
C Load the quadword in DATA_SRC storage into
C VEC_DST. GPR is general-purpose register
C used to obtain the effective address of
C DATA_SRC storage.
C DATA_LOAD_VEC(VEC_DST, DATA_SRC, GPR)
-define(<DATA_LOAD_VEC>,
-<ld $3,$2@got(2)
-lvx $1,0,$3>)
+define(`DATA_LOAD_VEC',
+`ld $3,$2@got(2)
+lvx $1,0,$3')
C powerpc64/p8/aes-decrypt-internal.asm
-ifelse(<
+ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
->)
+')
C Register usage:
-define(<SP>, <1>)
-define(<TOCP>, <2>)
+define(`SP', `1')
+define(`TOCP', `2')
-define(<ROUNDS>, <3>)
-define(<KEYS>, <4>)
-define(<LENGTH>, <6>)
-define(<DST>, <7>)
-define(<SRC>, <8>)
+define(`ROUNDS', `3')
+define(`KEYS', `4')
+define(`LENGTH', `6')
+define(`DST', `7')
+define(`SRC', `8')
-define(<swap_mask>, <0>)
+define(`swap_mask', `0')
-define(<K>, <1>)
-define(<S0>, <2>)
-define(<S1>, <3>)
-define(<S2>, <4>)
-define(<S3>, <5>)
-define(<S4>, <6>)
-define(<S5>, <7>)
-define(<S6>, <8>)
-define(<S7>, <9>)
+define(`K', `1')
+define(`S0', `2')
+define(`S1', `3')
+define(`S2', `4')
+define(`S3', `5')
+define(`S4', `6')
+define(`S5', `7')
+define(`S6', `8')
+define(`S7', `9')
C ZERO vector register is used in place of RoundKey
C for vncipher instruction because the order of InvMixColumns
C and Xor processes are flipped in that instruction.
C The Xor process with RoundKey is executed afterward.
-define(<ZERO>, <10>)
+define(`ZERO', `10')
.file "aes-decrypt-internal.asm"
C size_t length, uint8_t *dst,
C uint8_t *src)
-define(<FUNC_ALIGN>, <5>)
+define(`FUNC_ALIGN', `5')
PROLOGUE(_nettle_aes_decrypt)
vxor ZERO,ZERO,ZERO
lxvd2x VSR(S6),30,SRC
lxvd2x VSR(S7),31,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
- vperm S7,S7,S7,swap_mask>)
+ vperm S7,S7,S7,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vncipherlast S6,S6,K
vncipherlast S7,S7,K
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
- vperm S7,S7,S7,swap_mask>)
+ vperm S7,S7,S7,swap_mask')
stxvd2x VSR(S0),0,DST
stxvd2x VSR(S1),25,DST
addi 9,9,0x10
lxvd2x VSR(S3),9,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
- vperm S3,S3,S3,swap_mask>)
+ vperm S3,S3,S3,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vncipherlast S2,S2,K
vncipherlast S3,S3,K
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
- vperm S3,S3,S3,swap_mask>)
+ vperm S3,S3,S3,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
li 9,0x10
lxvd2x VSR(S1),9,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
- vperm S1,S1,S1,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask
+ vperm S1,S1,S1,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vncipherlast S0,S0,K
vncipherlast S1,S1,K
-IF_LE(<vperm S0,S0,S0,swap_mask
- vperm S1,S1,S1,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask
+ vperm S1,S1,S1,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
lxvd2x VSR(S0),0,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask')
vxor S0,S0,K
vperm K,K,K,swap_mask
vncipherlast S0,S0,K
-IF_LE(<vperm S0,S0,S0,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask')
stxvd2x VSR(S0),0,DST
.data
.align 4
.swap_mask:
-IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>)
-IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>)
+IF_LE(`.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7')
+IF_BE(`.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12')
C powerpc64/p8/aes-encrypt-internal.asm
-ifelse(<
+ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
->)
+')
C Register usage:
-define(<SP>, <1>)
-define(<TOCP>, <2>)
+define(`SP', `1')
+define(`TOCP', `2')
-define(<ROUNDS>, <3>)
-define(<KEYS>, <4>)
-define(<LENGTH>, <6>)
-define(<DST>, <7>)
-define(<SRC>, <8>)
+define(`ROUNDS', `3')
+define(`KEYS', `4')
+define(`LENGTH', `6')
+define(`DST', `7')
+define(`SRC', `8')
-define(<swap_mask>, <0>)
+define(`swap_mask', `0')
-define(<K>, <1>)
-define(<S0>, <2>)
-define(<S1>, <3>)
-define(<S2>, <4>)
-define(<S3>, <5>)
-define(<S4>, <6>)
-define(<S5>, <7>)
-define(<S6>, <8>)
-define(<S7>, <9>)
+define(`K', `1')
+define(`S0', `2')
+define(`S1', `3')
+define(`S2', `4')
+define(`S3', `5')
+define(`S4', `6')
+define(`S5', `7')
+define(`S6', `8')
+define(`S7', `9')
.file "aes-encrypt-internal.asm"
C size_t length, uint8_t *dst,
C uint8_t *src)
-define(<FUNC_ALIGN>, <5>)
+define(`FUNC_ALIGN', `5')
PROLOGUE(_nettle_aes_encrypt)
DATA_LOAD_VEC(swap_mask,.swap_mask,5)
lxvd2x VSR(S6),30,SRC
lxvd2x VSR(S7),31,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
- vperm S7,S7,S7,swap_mask>)
+ vperm S7,S7,S7,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vcipherlast S6,S6,K
vcipherlast S7,S7,K
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
- vperm S7,S7,S7,swap_mask>)
+ vperm S7,S7,S7,swap_mask')
stxvd2x VSR(S0),0,DST
stxvd2x VSR(S1),25,DST
addi 9,9,0x10
lxvd2x VSR(S3),9,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
- vperm S3,S3,S3,swap_mask>)
+ vperm S3,S3,S3,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vcipherlast S2,S2,K
vcipherlast S3,S3,K
-IF_LE(<vperm S0,S0,S0,swap_mask
+IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
- vperm S3,S3,S3,swap_mask>)
+ vperm S3,S3,S3,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
li 9,0x10
lxvd2x VSR(S1),9,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask
- vperm S1,S1,S1,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask
+ vperm S1,S1,S1,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
vcipherlast S0,S0,K
vcipherlast S1,S1,K
-IF_LE(<vperm S0,S0,S0,swap_mask
- vperm S1,S1,S1,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask
+ vperm S1,S1,S1,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
lxvd2x VSR(S0),0,SRC
-IF_LE(<vperm S0,S0,S0,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask')
vxor S0,S0,K
vperm K,K,K,swap_mask
vcipherlast S0,S0,K
-IF_LE(<vperm S0,S0,S0,swap_mask>)
+IF_LE(`vperm S0,S0,S0,swap_mask')
stxvd2x VSR(S0),0,DST
.data
.align 4
.swap_mask:
-IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>)
-IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>)
+IF_LE(`.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7')
+IF_BE(`.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12')