/** \brief Bitwise not for m128*/
static really_inline m128 not128(m128 a) {
- return (m128) vec_xor(a, a);
+ return (m128)vec_xor(a, ones128());
+ // or
+ return (m128)vec_xor(a, a);
}
/** \brief Return 1 if a and b are different otherwise 0 */
}
static really_inline int isnonzero128(m128 a) {
- return diff128(a, zeroes128());
+ return !!diff128(a, zeroes128());
}
/**
static really_really_inline
m128 lshift_m128(m128 a, unsigned b) {
- return (m128) vshlq_n_s32((int64x2_t)a, b);
+ //return (m128) vshlq_n_s32((int64x2_t)a, b);
+ return (m128) vec_sl((int64x2_t)a, b);
+ // or
+ // return (m128) vec_sll((int64x2_t)a, b);
+ // the above command executes Left shifts an entire vector by a given number of bits.
}
static really_really_inline
m128 rshift_m128(m128 a, unsigned b) {
- return (m128) vshrq_n_s32((int64x2_t)a, b);
+ //return (m128) vshrq_n_s32((int64x2_t)a, b);
+ return (m128) vec_srl((int64x2_t)a, b);
+ // or
+ // return (m128) vec_srl((int64x2_t)a, b);
+ // the above command executes Right shifts an entire vector by a given number of bits.
}
static really_really_inline
m128 lshift64_m128(m128 a, unsigned b) {
- return (m128) vshlq_n_s64((int64x2_t)a, b);
+ return (m128) vec_sldw ((int64x2_t)a, b, 8);
}
static really_really_inline
m128 rshift64_m128(m128 a, unsigned b) {
- return (m128) vshrq_n_s64((int64x2_t)a, b);
+ //return (m128) vshrq_n_s64((int64x2_t)a, b);
+ #warning FIXME
}
static really_inline m128 eq128(m128 a, m128 b) {
- return (m128) vceqq_s8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_all_eq((uint64x2_t)a, (uint64x2_t)b);
+ //or
+ //return (m128) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b);
}
static really_inline m128 eq64_m128(m128 a, m128 b) {
- return (m128) vceqq_u64((int64x2_t)a, (int64x2_t)b);
+ //return (m128) vceqq_u64((int64x2_t)a, (int64x2_t)b);
+ #warning FIXME
}
static really_inline u32 movemask128(m128 a) {
- static const uint8x16_t powers = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };
+ //static const uint8x16_t powers = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };
// Compute the mask from the input
- uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)a, powers))));
- uint64x2_t mask1 = (m128)vextq_s8(mask, zeroes128(), 7);
- mask = vorrq_u8(mask, mask1);
+ //uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)a, powers))));
+ //uint64x2_t mask1 = (m128)vextq_s8(mask, zeroes128(), 7);
+ //mask = vorrq_u8(mask, mask1);
// Get the resulting bytes
- uint16_t output;
- vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
- return output;
+ //uint16_t output;
+ //vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
+ //return output;
+ #warning FIXME
}
static really_inline m128 set1_16x8(u8 c) {
- return (m128) vdupq_n_u8(c);
+ //return (m128) vdupq_n_u8(c);
+ return (m128) vec_splat_u8(c);
}
static really_inline m128 set1_4x32(u32 c) {
- return (m128) vdupq_n_u32(c);
+ //return (m128) vdupq_n_u32(c);
+ return (m128) vec_splat_u32(c);
}
static really_inline m128 set1_2x64(u64a c) {
- return (m128) vdupq_n_u64(c);
+ //return (m128) vdupq_n_u64(c);
+ return (m128) vec_splat_u64(c);
}
static really_inline u32 movd(const m128 in) {
- return vgetq_lane_u32((uint32x4_t) in, 0);
+ //return vgetq_lane_u32((uint32x4_t) in, 0);
+ #warning FIXME
}
static really_inline u64a movq(const m128 in) {
- return vgetq_lane_u64((uint64x2_t) in, 0);
+ //return vgetq_lane_u64((uint64x2_t) in, 0);
+ #warning FIXME
}
/* another form of movq */
static really_inline
m128 load_m128_from_u64a(const u64a *p) {
- return (m128) vsetq_lane_u64(*p, zeroes128(), 0);
+ //return (m128) vsetq_lane_u64(*p, zeroes128(), 0);
+ #warning FIXME
}
+
static really_inline u32 extract32from128(const m128 in, unsigned imm) {
+/*
#if defined(HS_OPTIMIZE)
return vgetq_lane_u32((uint32x4_t) in, imm);
#else
break;
}
#endif
+*/
+#warning FIXME
}
static really_inline u64a extract64from128(const m128 in, unsigned imm) {
+/*
#if defined(HS_OPTIMIZE)
return vgetq_lane_u64((uint64x2_t) in, imm);
#else
break;
}
#endif
+*/
+#warning FIXME
}
static really_inline m128 low64from128(const m128 in) {
- return vcombine_u64(vget_low_u64(in), vdup_n_u64(0));
+ //return vcombine_u64(vget_low_u64(in), vdup_n_u64(0));
+ #warning FIXME
}
static really_inline m128 high64from128(const m128 in) {
- return vcombine_u64(vget_high_u64(in), vdup_n_u64(0));
+ //return vcombine_u64(vget_high_u64(in), vdup_n_u64(0));
+ #warning FIXME
}
+
static really_inline m128 add128(m128 a, m128 b) {
- return (m128) vaddq_u64((uint64x2_t)a, (uint64x2_t)b);
+ return (m128) vec_add((uint64x2_t)a, (uint64x2_t)b);
}
static really_inline m128 and128(m128 a, m128 b) {
- return (m128) vandq_s8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_and((int8x16_t)a, (int8x16_t)b);
}
static really_inline m128 xor128(m128 a, m128 b) {
- return (m128) veorq_s8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_xor((int8x16_t)a, (int8x16_t)b);
}
static really_inline m128 or128(m128 a, m128 b) {
- return (m128) vorrq_s8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_or((int8x16_t)a, (int8x16_t)b);
}
static really_inline m128 andnot128(m128 a, m128 b) {
- return (m128) (m128) vandq_s8( vmvnq_s8(a), b);
+ m128 and_res = and128(a,b);
+ return (m128) not128(and_res);
+ // or
+ //return (m128) not128(and128(a,b));
}
// aligned load
static really_inline m128 load128(const void *ptr) {
assert(ISALIGNED_N(ptr, alignof(m128)));
- return (m128) vld1q_s32((const int32_t *)ptr);
+ //return (m128) vld1q_s32((const int32_t *)ptr);
+ //return *(int64x2_t *) (&ptr[0]);
+ #warning FIXME
}
// aligned store
static really_inline void store128(void *ptr, m128 a) {
- assert(ISALIGNED_N(ptr, alignof(m128)));
- vst1q_s32((int32_t *)ptr, a);
+ //assert(ISALIGNED_N(ptr, alignof(m128)));
+ //vst1q_s32((int32_t *)ptr, a);
+ #warning FIXME
}
// unaligned load
static really_inline m128 loadu128(const void *ptr) {
- return (m128) vld1q_s32((const int32_t *)ptr);
+ //return (m128) vld1q_s32((const int32_t *)ptr);
+ //return *(uint64x2_t *) (&ptr[0]);
+ #warning FIXME
}
// unaligned store
static really_inline void storeu128(void *ptr, m128 a) {
- vst1q_s32((int32_t *)ptr, a);
+ //vst1q_s32((int32_t *)ptr, a);
+ #warning FIXME
}
// packed unaligned store of first N bytes
static really_really_inline
m128 palignr(m128 r, m128 l, int offset) {
+/*
#if defined(HS_OPTIMIZE)
return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset);
#else
return palignr_imm(r, l, offset);
#endif
+*/
+#warning FIXME
}
+
#undef CASE_ALIGN_VECTORS
static really_really_inline
m128 rshiftbyte_m128(m128 a, unsigned b) {
- return palignr(zeroes128(), a, b);
+ //return palignr(zeroes128(), a, b);
+ #warning FIXME
}
static really_really_inline
m128 lshiftbyte_m128(m128 a, unsigned b) {
- return palignr(a, zeroes128(), 16 - b);
+ //return palignr(a, zeroes128(), 16 - b);
+ #warning FIXME
}
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
+/*
assert(amount >= -16 && amount <= 16);
static const uint8x16_t vbs_mask = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
const uint8x16_t outside_mask = set1_16x8(0xf0);
m128 shift_mask = palignr_imm(vbs_mask, outside_mask, 16 - amount);
return vqtbl1q_s8(in, shift_mask);
+*/
+#warning FIXME
}
#ifdef __cplusplus
static really_inline
char testbit128(m128 val, unsigned int n) {
const m128 mask = mask1bit128(n);
-
return isnonzero128(and128(mask, val));
}
/* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf.
In NEON, if >=16, then the result is zero, otherwise it is that lane.
btranslated is the version that is converted from Intel to NEON. */
- int8x16_t btranslated = vandq_s8((int8x16_t)b,vdupq_n_s8(0x8f));
- return (m128)vqtbl1q_s8((int8x16_t)a, (uint8x16_t)btranslated);
+ //int8x16_t btranslated = vandq_s8((int8x16_t)b,vdupq_n_s8(0x8f));
+ //return (m128)vqtbl1q_s8((int8x16_t)a, (uint8x16_t)btranslated);
+ #warning FIXME
}
static really_inline
m128 max_u8_m128(m128 a, m128 b) {
- return (m128) vmaxq_u8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_max((int8x16_t)a, (int8x16_t)b);
}
static really_inline
m128 min_u8_m128(m128 a, m128 b) {
- return (m128) vminq_u8((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_min((int8x16_t)a, (int8x16_t)b);
}
static really_inline
m128 sadd_u8_m128(m128 a, m128 b) {
- return (m128) vqaddq_u8((uint8x16_t)a, (uint8x16_t)b);
+ return (m128) vec_add((uint8x16_t)a, (uint8x16_t)b);
}
static really_inline
m128 sub_u8_m128(m128 a, m128 b) {
- return (m128) vsubq_u8((uint8x16_t)a, (uint8x16_t)b);
+ return (m128) vec_sub((uint8x16_t)a, (uint8x16_t)b);
}
static really_inline
m128 set4x32(u32 x3, u32 x2, u32 x1, u32 x0) {
- uint32_t ALIGN_ATTR(16) data[4] = { x0, x1, x2, x3 };
- return (m128) vld1q_u32((uint32_t *) data);
+ //uint32_t ALIGN_ATTR(16) data[4] = { x0, x1, x2, x3 };
+ //return (m128) vld1q_u32((uint32_t *) data);
+ #warning FIXME
}
static really_inline
m128 set2x64(u64a hi, u64a lo) {
- uint64_t ALIGN_ATTR(16) data[2] = { lo, hi };
- return (m128) vld1q_u64((uint64_t *) data);
+ //uint64_t ALIGN_ATTR(16) data[2] = { lo, hi };
+ //return (m128) vld1q_u64((uint64_t *) data);
+ #warning FIXME
}
-#endif // ARCH_ARM_SIMD_UTILS_H
+#endif // ARCH_PPC64EL_SIMD_UTILS_H
really_inline SuperVector<16>::SuperVector<int8_t>(int8_t const other)
{
//u.v128[0] = _mm_set1_epi8(other);
- u.v128[0] = vdupq_n_u8(other);
+ u.v128[0] = vec_splat_s8(other);
}
template<>
really_inline SuperVector<16>::SuperVector<uint8_t>(uint8_t const other)
{
//u.v128[0] = _mm_set1_epi8(static_cast<int8_t>(other));
- u.v128[0] = vdupq_n_u8(static_cast<int8_t>(other));
+ u.v128[0] = vec_splat_s8(static_cast<int8_t>(other));
}
template<>
really_inline SuperVector<16>::SuperVector<int16_t>(int16_t const other)
{
//u.v128[0] = _mm_set1_epi16(other);
+ u.v128[0] = vec_splat_s16(other);
}
template<>
really_inline SuperVector<16>::SuperVector<uint16_t>(uint16_t const other)
{
//u.v128[0] = _mm_set1_epi16(static_cast<int16_t>(other));
+ u.v128[0] = vec_splat_s16(static_cast<int8_t>(other));
}
template<>
really_inline SuperVector<16>::SuperVector<int32_t>(int32_t const other)
{
//u.v128[0] = _mm_set1_epi32(other);
- u.v128[0] = vdupq_n_u32(other);
+ u.v128[0] = vec_splat_s32(other);
}
template<>
really_inline SuperVector<16>::SuperVector<uint32_t>(uint32_t const other)
{
//u.v128[0] = _mm_set1_epi32(static_cast<int32_t>(other));
- u.v128[0] = vdupq_n_u32(static_cast<int32_t>(other));
+ u.v128[0] = vec_splat_s32(static_cast<int8_t>(other));
}
template<>
really_inline SuperVector<16>::SuperVector<int64_t>(int64_t const other)
{
//u.v128[0] = _mm_set1_epi64x(other);
- u.v128[0] = vdupq_n_u64(other);
+ u.v128[0] = vec_splat_u64(other);
}
template<>
really_inline SuperVector<16>::SuperVector<uint64_t>(uint64_t const other)
{
//u.v128[0] = _mm_set1_epi64x(static_cast<int64_t>(other));
- u.v128[0] = vdupq_n_u64(static_cast<int64_t>(other));
+ u.v128[0] = vec_splat_u32(static_cast<int8_t>(other));
}
// Constants
really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const
{
//return {_mm_and_si128(u.v128[0], b.u.v128[0])};
- return {vec_add(u.v128[0], b.u.v128[0])};
+ return {vec_and(u.v128[0], b.u.v128[0])};
}
template <>
really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const
{
//return {_mm_andnot_si128(u.v128[0], b.u.v128[0])};
- return 0;
+ #warning FIXME
}
template <>
really_inline SuperVector<16> SuperVector<16>::eq(SuperVector<16> const &b) const
{
//return {_mm_cmpeq_epi8(u.v128[0], b.u.v128[0])};
- return {vec_cmpeq(u.v128[0], b.u.v128[0])};
+ return { vec_all_eq(u.v128[0], b.u.v128[0])};
}
template <>
{
//return _mm_movemask_epi8(u.v128[0]);
// Compute the mask from the input
- uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)u.v128[0], 0))));
- uint64x2_t mask1 = (m128)vextq_s8(mask, Zeroes(), 7);
- mask = vorrq_u8(mask, mask1);
+ //uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)u.v128[0], 0))));
+ //uint64x2_t mask1 = (m128)vextq_s8(mask, Zeroes(), 7);
+ //mask = vorrq_u8(mask, mask1);
// Get the resulting bytes
- uint16_t output;
- vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
- return output;
- return 0;
+ //uint16_t output;
+ //vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
+ //return output;
+ #warning FIXME
}
template <>
really_inline SuperVector<16> SuperVector<16>::rshift128_var(uint8_t const N) const
{
switch(N) {
- case 1: return {vshrq_n_s32(u.v128[0], 1)}; break;
- case 2: return {vshrq_n_s32(u.v128[0], 2)}; break;
- case 3: return {vshrq_n_s32(u.v128[0], 3)}; break;
- case 4: return {vshrq_n_s32(u.v128[0], 4)}; break;
- case 5: return {vshrq_n_s32(u.v128[0], 5)}; break;
- case 6: return {vshrq_n_s32(u.v128[0], 6)}; break;
- case 7: return {vshrq_n_s32(u.v128[0], 7)}; break;
- case 8: return {vshrq_n_s32(u.v128[0], 8)}; break;
- case 9: return {vshrq_n_s32(u.v128[0], 9)}; break;
- case 10: return {vshrq_n_s32(u.v128[0], 10)}; break;
- case 11: return {vshrq_n_s32(u.v128[0], 11)}; break;
- case 12: return {vshrq_n_s32(u.v128[0], 12)}; break;
- case 13: return {vshrq_n_s32(u.v128[0], 13)}; break;
- case 14: return {vshrq_n_s32(u.v128[0], 14)}; break;
- case 15: return {vshrq_n_s32(u.v128[0], 15)}; break;
+ case 1: return {vec_srl(u.v128[0], 1)}; break;
+ case 2: return {vec_srl(u.v128[0], 2)}; break;
+ case 3: return {vec_srl(u.v128[0], 3)}; break;
+ case 4: return {vec_srl(u.v128[0], 4)}; break;
+ case 5: return {vec_srl(u.v128[0], 5)}; break;
+ case 6: return {vec_srl(u.v128[0], 6)}; break;
+ case 7: return {vec_srl(u.v128[0], 7)}; break;
+ case 8: return {vec_srl(u.v128[0], 8)}; break;
+ case 9: return {vec_srl(u.v128[0], 9)}; break;
+ case 10: return {vec_srl(u.v128[0], 10)}; break;
+ case 11: return {vec_srl(u.v128[0], 11)}; break;
+ case 12: return {vec_srl(u.v128[0], 12)}; break;
+ case 13: return {vec_srl(u.v128[0], 13)}; break;
+ case 14: return {vec_srl(u.v128[0], 14)}; break;
+ case 15: return {vec_srl(u.v128[0], 15)}; break;
case 16: return Zeroes(); break;
default: break;
}
template <>
really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const
{
- return {vshrq_n_s32(u.v128[0], N)};
+ return {vec_srl(u.v128[0], N)};
}
#else
template <>
really_inline SuperVector<16> SuperVector<16>::lshift128_var(uint8_t const N) const
{
switch(N) {
- case 1: return {vshlq_n_s32(u.v128[0], 1)}; break;
- case 2: return {vshlq_n_s32(u.v128[0], 2)}; break;
- case 3: return {vshlq_n_s32(u.v128[0], 3)}; break;
- case 4: return {vshlq_n_s32(u.v128[0], 4)}; break;
- case 5: return {vshlq_n_s32(u.v128[0], 5)}; break;
- case 6: return {vshlq_n_s32(u.v128[0], 6)}; break;
- case 7: return {vshlq_n_s32(u.v128[0], 7)}; break;
- case 8: return {vshlq_n_s32(u.v128[0], 8)}; break;
- case 9: return {vshlq_n_s32(u.v128[0], 9)}; break;
- case 10: return {vshlq_n_s32(u.v128[0], 10)}; break;
- case 11: return {vshlq_n_s32(u.v128[0], 11)}; break;
- case 12: return {vshlq_n_s32(u.v128[0], 12)}; break;
- case 13: return {vshlq_n_s32(u.v128[0], 13)}; break;
- case 14: return {vshlq_n_s32(u.v128[0], 14)}; break;
- case 15: return {vshlq_n_s32(u.v128[0], 15)}; break;
+ case 1: return {vec_sll(u.v128[0], 1)}; break;
+ case 2: return {vec_sll(u.v128[0], 2)}; break;
+ case 3: return {vec_sll(u.v128[0], 3)}; break;
+ case 4: return {vec_sll(u.v128[0], 4)}; break;
+ case 5: return {vec_sll(u.v128[0], 5)}; break;
+ case 6: return {vec_sll(u.v128[0], 6)}; break;
+ case 7: return {vec_sll(u.v128[0], 7)}; break;
+ case 8: return {vec_sll(u.v128[0], 8)}; break;
+ case 9: return {vec_sll(u.v128[0], 9)}; break;
+ case 10: return {vec_sll(u.v128[0], 10)}; break;
+ case 11: return {vec_sll(u.v128[0], 11)}; break;
+ case 12: return {vec_sll(u.v128[0], 12)}; break;
+ case 13: return {vec_sll(u.v128[0], 13)}; break;
+ case 14: return {vec_sll(u.v128[0], 14)}; break;
+ case 15: return {vec_sll(u.v128[0], 15)}; break;
case 16: return Zeroes(); break;
default: break;
}
template <>
really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const
{
- return {vshlq_n_s32(u.v128[0], N)};
+ return {vec_sll(u.v128[0], N)};
}
#else
template <>
really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr)
{
//return _mm_loadu_si128((const m128 *)ptr);
- return vld1q_s32((const int32_t *)ptr)
+ #warning FIXME
}
template <>
//assert(ISALIGNED_N(ptr, alignof(SuperVector::size)));
//ptr = assume_aligned(ptr, SuperVector::size);
//return _mm_load_si128((const m128 *)ptr);
- assert(ISALIGNED_N(ptr, alignof(m128)));
- return vld1q_s32((const int32_t *)ptr);
-
+ //assert(ISALIGNED_N(ptr, alignof(m128)));
+ //return vld1q_s32((const int32_t *)ptr);
+ #warning FIXME
}
template <>
really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len)
{
- SuperVector<16> mask = Ones().rshift128_var(16 -len);
- mask.print8("mask");
- SuperVector<16> v = vld1q_s32((const int32_t *)ptr);
- v.print8("v");
- return mask & v;
+ //SuperVector<16> mask = Ones().rshift128_var(16 -len);
+ //mask.print8("mask");
+ //SuperVector<16> v = vld1q_s32((const int32_t *)ptr);
+ //v.print8("v");
+ //return mask & v;
+ #warning FIXME
}
#ifdef HS_OPTIMIZE
template<>
really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
{
- return {vextq_s8(u.v128[0], other.u.v128[0], offset)};
+ //return {vextq_s8(u.v128[0], other.u.v128[0], offset)};
+ #warning FIXME
}
#else
template<>
really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
-{
+{
+ /*
switch(offset) {
case 0: return other; break;
case 1: return {vextq_s8(u.v128[0], other.u.v128[0], 1)}; break;
default: break;
}
return *this;
+ */
+ #warning FIXME
}
#endif
really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b)
{
//return {_mm_shuffle_epi8(u.v128[0], b.u.v128[0])};
- int8x16_t btranslated = vandq_s8((int8x16_t)b.u.v128[0],vdupq_n_s8(0x8f));
- return (m128)vqtbl1q_s8((int8x16_t)u.v128[0], (uint8x16_t)btranslated);
+ //int8x16_t btranslated = vandq_s8((int8x16_t)b.u.v128[0],vdupq_n_s8(0x8f));
+ //return (m128)vqtbl1q_s8((int8x16_t)u.v128[0], (uint8x16_t)btranslated);
+ #warning FIXME
}
template<>
template<>
really_inline SuperVector<16> SuperVector<16>::lshift64(uint8_t const N)
{
- return {vshlq_n_s64(u.v128[0], N)};
+ //return {vshlq_n_s64(u.v128[0], N)};
+ return {vec_sldw((int64x2_t)u.v128[0], N, 8)};
}
#else
template<>
{
switch(N) {
case 0: return *this; break;
- case 1: return {vshlq_n_s64(u.v128[0], 1)}; break;
- case 2: return {vshlq_n_s64(u.v128[0], 2)}; break;
- case 3: return {vshlq_n_s64(u.v128[0], 3)}; break;
- case 4: return {vshlq_n_s64(u.v128[0], 4)}; break;
- case 5: return {vshlq_n_s64(u.v128[0], 5)}; break;
- case 6: return {vshlq_n_s64(u.v128[0], 6)}; break;
- case 7: return {vshlq_n_s64(u.v128[0], 7)}; break;
- case 8: return {vshlq_n_s64(u.v128[0], 8)}; break;
- case 9: return {vshlq_n_s64(u.v128[0], 9)}; break;
- case 10: return {vshlq_n_s64(u.v128[0], 10)}; break;
- case 11: return {vshlq_n_s64(u.v128[0], 11)}; break;
- case 12: return {vshlq_n_s64(u.v128[0], 12)}; break;
- case 13: return {vshlq_n_s64(u.v128[0], 13)}; break;
- case 14: return {vshlq_n_s64(u.v128[0], 14)}; break;
- case 15: return {vshlq_n_s64(u.v128[0], 15)}; break;
+ case 1: return {vec_sldw((int64x2_t)u.v128[0], 1, 8)}; break;
+ case 2: return {vec_sldw((int64x2_t)u.v128[0], 2, 8)}; break;
+ case 3: return {vec_sldw((int64x2_t)u.v128[0], 3, 8)}; break;
+ case 4: return {vec_sldw((int64x2_t)u.v128[0], 4, 8)}; break;
+ case 5: return {vec_sldw((int64x2_t)u.v128[0], 5, 8)}; break;
+ case 6: return {vec_sldw((int64x2_t)u.v128[0], 6, 8)}; break;
+ case 7: return {vec_sldw((int64x2_t)u.v128[0], 7, 8)}; break;
+ case 8: return {vec_sldw((int64x2_t)u.v128[0], 8, 8)}; break;
+ case 9: return {vec_sldw((int64x2_t)u.v128[0], 9, 8)}; break;
+ case 10: return {vec_sldw((int64x2_t)u.v128[0], 10, 8)}; break;
+ case 11: return {vec_sldw((int64x2_t)u.v128[0], 11, 8)}; break;
+ case 12: return {vec_sldw((int64x2_t)u.v128[0], 12, 8)}; break;
+ case 13: return {vec_sldw((int64x2_t)u.v128[0], 13, 8)}; break;
+ case 14: return {vec_sldw((int64x2_t)u.v128[0], 14, 8)}; break;
+ case 15: return {vec_sldw((int64x2_t)u.v128[0], 15, 8)}; break;
case 16: return Zeroes();
default: break;
}
template<>
really_inline SuperVector<16> SuperVector<16>::rshift64(uint8_t const N)
{
- return {vshrq_n_s64(u.v128[0], N)};
+ //return {vshrq_n_s64(u.v128[0], N)};
+ #warning FIXME
}
#else
template<>
really_inline SuperVector<16> SuperVector<16>::rshift64(uint8_t const N)
-{
+{
+ /*
switch(N) {
case 0: return {vshrq_n_s64(u.v128[0], 0)}; break;
case 1: return {vshrq_n_s64(u.v128[0], 1)}; break;
default: break;
}
return *this;
+ */
+ #warning FIXME
}
#endif