VAR1 (BINOPP, crypto_pmull, 0, NONE, di)
VAR1 (BINOPP, crypto_pmull, 0, NONE, v2di)
- /* Implemented by aarch64_tbl3<mode>. */
- VAR1 (BINOP, tbl3, 0, NONE, v8qi)
- VAR1 (BINOP, tbl3, 0, NONE, v16qi)
+ /* Implemented by aarch64_qtbl1<mode>. */
+ VAR2 (BINOP, qtbl1, 0, NONE, v8qi, v16qi)
+ VAR2 (BINOPU, qtbl1, 0, NONE, v8qi, v16qi)
- /* Implemented by aarch64_tbl1<mode>. */
- VAR2 (BINOP, tbl1, 0, NONE, v8qi, v16qi)
- VAR2 (BINOPU, tbl1, 0, NONE, v8qi, v16qi)
+ /* Implemented by aarch64_qtbl2<mode>. */
+ VAR2 (BINOP, qtbl2, 0, NONE, v8qi, v16qi)
/* Implemented by aarch64_qtbl3<mode>. */
- VAR1 (BINOP, qtbl3, 0, NONE, v8qi)
- VAR1 (BINOP, qtbl3, 0, NONE, v16qi)
+ VAR2 (BINOP, qtbl3, 0, NONE, v8qi, v16qi)
/* Implemented by aarch64_qtbl4<mode>. */
- VAR1 (BINOP, qtbl4, 0, NONE, v8qi)
- VAR1 (BINOP, qtbl4, 0, NONE, v16qi)
+ VAR2 (BINOP, qtbl4, 0, NONE, v8qi, v16qi)
- /* Implemented by aarch64_tbx1<mode>. */
- VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi)
- VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi)
+ /* Implemented by aarch64_qtbx1<mode>. */
+ VAR2 (TERNOP, qtbx1, 0, NONE, v8qi, v16qi)
+ VAR2 (TERNOPU, qtbx1, 0, NONE, v8qi, v16qi)
- /* Implemented by aarch64_tbx4<mode>. */
- VAR1 (TERNOP, tbx4, 0, NONE, v8qi)
- VAR1 (TERNOP, tbx4, 0, NONE, v16qi)
+ /* Implemented by aarch64_qtbx2<mode>. */
+ VAR2 (TERNOP, qtbx2, 0, NONE, v8qi, v16qi)
/* Implemented by aarch64_qtbx3<mode>. */
- VAR1 (TERNOP, qtbx3, 0, NONE, v8qi)
- VAR1 (TERNOP, qtbx3, 0, NONE, v16qi)
+ VAR2 (TERNOP, qtbx3, 0, NONE, v8qi, v16qi)
/* Implemented by aarch64_qtbx4<mode>. */
- VAR1 (TERNOP, qtbx4, 0, NONE, v8qi)
- VAR1 (TERNOP, qtbx4, 0, NONE, v16qi)
+ VAR2 (TERNOP, qtbx4, 0, NONE, v8qi, v16qi)
/* Builtins for ARMv8.1-A Adv.SIMD instructions. */
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_p8 (poly8x16_t __tab, uint8x8_t __idx)
{
- return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __tab,
- (int8x8_t) __idx);
+ return (poly8x8_t) __builtin_aarch64_qtbl1v8qi ((int8x16_t) __tab,
+ (int8x8_t) __idx);
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_s8 (int8x16_t __tab, uint8x8_t __idx)
{
- return __builtin_aarch64_tbl1v8qi (__tab, (int8x8_t) __idx);
+ return __builtin_aarch64_qtbl1v8qi (__tab, (int8x8_t) __idx);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_u8 (uint8x16_t __tab, uint8x8_t __idx)
{
- return __builtin_aarch64_tbl1v8qi_uuu (__tab, __idx);
+ return __builtin_aarch64_qtbl1v8qi_uuu (__tab, __idx);
}
__extension__ extern __inline poly8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_p8 (poly8x16_t __tab, uint8x16_t __idx)
{
- return (poly8x16_t) __builtin_aarch64_tbl1v16qi ((int8x16_t) __tab,
- (int8x16_t) __idx);
+ return (poly8x16_t) __builtin_aarch64_qtbl1v16qi ((int8x16_t) __tab,
+ (int8x16_t) __idx);
}
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_s8 (int8x16_t __tab, uint8x16_t __idx)
{
- return __builtin_aarch64_tbl1v16qi (__tab, (int8x16_t) __idx);
+ return __builtin_aarch64_qtbl1v16qi (__tab, (int8x16_t) __idx);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_u8 (uint8x16_t __tab, uint8x16_t __idx)
{
- return __builtin_aarch64_tbl1v16qi_uuu (__tab, __idx);
+ return __builtin_aarch64_qtbl1v16qi_uuu (__tab, __idx);
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx)
{
- return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx);
+ return __builtin_aarch64_qtbx1v8qi (__r, __tab, (int8x8_t) __idx);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx)
{
- return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx);
+ return __builtin_aarch64_qtbx1v8qi_uuuu (__r, __tab, __idx);
}
__extension__ extern __inline poly8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx)
{
- return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
- (int8x16_t) __tab,
- (int8x8_t) __idx);
+ return (poly8x8_t) __builtin_aarch64_qtbx1v8qi ((int8x8_t) __r,
+ (int8x16_t) __tab,
+ (int8x8_t) __idx);
}
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx)
{
- return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx);
+ return __builtin_aarch64_qtbx1v16qi (__r, __tab, (int8x16_t) __idx);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx)
{
- return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx);
+ return __builtin_aarch64_qtbx1v16qi_uuuu (__r, __tab, __idx);
}
__extension__ extern __inline poly8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx)
{
- return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r,
- (int8x16_t) __tab,
- (int8x16_t) __idx);
+ return (poly8x16_t) __builtin_aarch64_qtbx1v16qi ((int8x16_t) __r,
+ (int8x16_t) __tab,
+ (int8x16_t) __idx);
}
/* V7 legacy table intrinsics. */
{
int8x16_t __temp = vcombine_s8 (__tab,
vcreate_s8 (__AARCH64_UINT64_C (0x0)));
- return __builtin_aarch64_tbl1v8qi (__temp, __idx);
+ return __builtin_aarch64_qtbl1v8qi (__temp, __idx);
}
__extension__ extern __inline uint8x8_t
{
uint8x16_t __temp = vcombine_u8 (__tab,
vcreate_u8 (__AARCH64_UINT64_C (0x0)));
- return __builtin_aarch64_tbl1v8qi_uuu (__temp, __idx);
+ return __builtin_aarch64_qtbl1v8qi_uuu (__temp, __idx);
}
__extension__ extern __inline poly8x8_t
{
poly8x16_t __temp = vcombine_p8 (__tab,
vcreate_p8 (__AARCH64_UINT64_C (0x0)));
- return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __temp,
- (int8x8_t) __idx);
+ return (poly8x8_t) __builtin_aarch64_qtbl1v8qi ((int8x16_t) __temp,
+ (int8x8_t) __idx);
}
__extension__ extern __inline int8x8_t
vtbl2_s8 (int8x8x2_t __tab, int8x8_t __idx)
{
int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
- return __builtin_aarch64_tbl1v8qi (__temp, __idx);
+ return __builtin_aarch64_qtbl1v8qi (__temp, __idx);
}
__extension__ extern __inline uint8x8_t
vtbl2_u8 (uint8x8x2_t __tab, uint8x8_t __idx)
{
uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
- return __builtin_aarch64_tbl1v8qi_uuu (__temp, __idx);
+ return __builtin_aarch64_qtbl1v8qi_uuu (__temp, __idx);
}
__extension__ extern __inline poly8x8_t
vtbl2_p8 (poly8x8x2_t __tab, uint8x8_t __idx)
{
poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
- return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __temp,
- (int8x8_t) __idx);
+ return (poly8x8_t) __builtin_aarch64_qtbl1v8qi ((int8x16_t) __temp,
+ (int8x8_t) __idx);
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_s8 (int8x8x3_t __tab, int8x8_t __idx)
{
- int8x8_t __result;
int8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = __builtin_aarch64_tbl3v8qi (__o, __idx);
- return __result;
+ return __builtin_aarch64_qtbl2v8qi (__o, __idx);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_u8 (uint8x8x3_t __tab, uint8x8_t __idx)
{
- uint8x8_t __result;
uint8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
- return __result;
+ return (uint8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline poly8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_p8 (poly8x8x3_t __tab, uint8x8_t __idx)
{
- poly8x8_t __result;
poly8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
- return __result;
+ return (poly8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_s8 (int8x8x4_t __tab, int8x8_t __idx)
{
- int8x8_t __result;
int8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = __builtin_aarch64_tbl3v8qi (__o, __idx);
- return __result;
+ return __builtin_aarch64_qtbl2v8qi (__o, __idx);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_u8 (uint8x8x4_t __tab, uint8x8_t __idx)
{
- uint8x8_t __result;
uint8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
- return __result;
+ return (uint8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline poly8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_p8 (poly8x8x4_t __tab, uint8x8_t __idx)
{
- poly8x8_t __result;
poly8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
- return __result;
+ return(poly8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline int8x8_t
vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx)
{
int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
- return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx);
+ return __builtin_aarch64_qtbx1v8qi (__r, __temp, __idx);
}
__extension__ extern __inline uint8x8_t
vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx)
{
uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
- return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx);
+ return __builtin_aarch64_qtbx1v8qi_uuuu (__r, __temp, __idx);
}
__extension__ extern __inline poly8x8_t
vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx)
{
poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
- return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
- (int8x16_t) __temp,
- (int8x8_t) __idx);
+ return (poly8x8_t) __builtin_aarch64_qtbx1v8qi ((int8x8_t) __r,
+ (int8x16_t) __temp,
+ (int8x8_t) __idx);
}
/* End of temporary inline asm. */
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
- return __builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
+ return __builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline uint8x8_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
+ return (uint8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline poly8x8_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
+ return (poly8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx);
}
__extension__ extern __inline int8x16_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return __builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
+ return __builtin_aarch64_qtbl2v16qi (__o, (int8x16_t)__idx);
}
__extension__ extern __inline uint8x16_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (uint8x16_t)__builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
+ return (uint8x16_t)__builtin_aarch64_qtbl2v16qi (__o, (int8x16_t)__idx);
}
__extension__ extern __inline poly8x16_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (poly8x16_t)__builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
+ return (poly8x16_t)__builtin_aarch64_qtbl2v16qi (__o, (int8x16_t)__idx);
}
/* vqtbl3 */
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
- return __builtin_aarch64_tbx4v8qi (__r, __o, (int8x8_t)__idx);
+ return __builtin_aarch64_qtbx2v8qi (__r, __o, (int8x8_t)__idx);
}
__extension__ extern __inline uint8x8_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (uint8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
- (int8x8_t)__idx);
+ return (uint8x8_t)__builtin_aarch64_qtbx2v8qi ((int8x8_t)__r, __o,
+ (int8x8_t)__idx);
}
__extension__ extern __inline poly8x8_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (poly8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
- (int8x8_t)__idx);
+ return (poly8x8_t)__builtin_aarch64_qtbx2v8qi ((int8x8_t)__r, __o,
+ (int8x8_t)__idx);
}
__extension__ extern __inline int8x16_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
- return __builtin_aarch64_tbx4v16qi (__r, __o, (int8x16_t)__idx);
+ return __builtin_aarch64_qtbx2v16qi (__r, __o, (int8x16_t)__idx);
}
__extension__ extern __inline uint8x16_t
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (uint8x16_t)__builtin_aarch64_tbx4v16qi ((int8x16_t)__r, __o,
+ return (uint8x16_t)__builtin_aarch64_qtbx2v16qi ((int8x16_t)__r, __o,
(int8x16_t)__idx);
}
__builtin_aarch64_simd_oi __o;
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
- return (poly8x16_t)__builtin_aarch64_tbx4v16qi ((int8x16_t)__r, __o,
- (int8x16_t)__idx);
+ return (poly8x16_t)__builtin_aarch64_qtbx2v16qi ((int8x16_t)__r, __o,
+ (int8x16_t)__idx);
}
/* vqtbx3 */
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_s8 (int8x8_t __r, int8x8x4_t __tab, int8x8_t __idx)
{
- int8x8_t __result;
int8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = __builtin_aarch64_tbx4v8qi (__r, __o, __idx);
- return __result;
+ return __builtin_aarch64_qtbx2v8qi (__r, __o, __idx);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_u8 (uint8x8_t __r, uint8x8x4_t __tab, uint8x8_t __idx)
{
- uint8x8_t __result;
uint8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (uint8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
- (int8x8_t)__idx);
- return __result;
+ return (uint8x8_t)__builtin_aarch64_qtbx2v8qi ((int8x8_t)__r, __o,
+ (int8x8_t)__idx);
}
__extension__ extern __inline poly8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_p8 (poly8x8_t __r, poly8x8x4_t __tab, uint8x8_t __idx)
{
- poly8x8_t __result;
poly8x16x2_t __temp;
__builtin_aarch64_simd_oi __o;
__temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
(int8x16_t) __temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o,
(int8x16_t) __temp.val[1], 1);
- __result = (poly8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
- (int8x8_t)__idx);
- return __result;
+ return (poly8x8_t)__builtin_aarch64_qtbx2v8qi ((int8x8_t)__r, __o,
+ (int8x8_t)__idx);
}
/* vtrn */