]>
git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i386/smmintrin.h
1 /* Copyright (C) 2007-2024 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 10.0. */
27 #ifndef _SMMINTRIN_H_INCLUDED
28 #define _SMMINTRIN_H_INCLUDED
30 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
32 #include <tmmintrin.h>
35 #pragma GCC push_options
36 #pragma GCC target("sse4.1")
37 #define __DISABLE_SSE4_1__
38 #endif /* __SSE4_1__ */
40 /* Rounding mode macros. */
41 #define _MM_FROUND_TO_NEAREST_INT 0x00
42 #define _MM_FROUND_TO_NEG_INF 0x01
43 #define _MM_FROUND_TO_POS_INF 0x02
44 #define _MM_FROUND_TO_ZERO 0x03
45 #define _MM_FROUND_CUR_DIRECTION 0x04
47 #define _MM_FROUND_RAISE_EXC 0x00
48 #define _MM_FROUND_NO_EXC 0x08
50 #define _MM_FROUND_NINT \
51 (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
52 #define _MM_FROUND_FLOOR \
53 (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
54 #define _MM_FROUND_CEIL \
55 (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
56 #define _MM_FROUND_TRUNC \
57 (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
58 #define _MM_FROUND_RINT \
59 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
60 #define _MM_FROUND_NEARBYINT \
61 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
63 /* Test Instruction */
64 /* Packed integer 128-bit bitwise comparison. Return 1 if
66 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
67 _mm_testz_si128 (__m128i __M
, __m128i __V
)
69 return __builtin_ia32_ptestz128 ((__v2di
)__M
, (__v2di
)__V
);
72 /* Packed integer 128-bit bitwise comparison. Return 1 if
74 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
75 _mm_testc_si128 (__m128i __M
, __m128i __V
)
77 return __builtin_ia32_ptestc128 ((__v2di
)__M
, (__v2di
)__V
);
80 /* Packed integer 128-bit bitwise comparison. Return 1 if
81 (__V & __M) != 0 && (__V & ~__M) != 0. */
82 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
83 _mm_testnzc_si128 (__m128i __M
, __m128i __V
)
85 return __builtin_ia32_ptestnzc128 ((__v2di
)__M
, (__v2di
)__V
);
88 /* Macros for packed integer 128-bit comparison intrinsics. */
89 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
91 #define _mm_test_all_ones(V) \
92 _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
94 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
96 /* Packed/scalar double precision floating point rounding. */
99 extern __inline __m128d
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
100 _mm_round_pd (__m128d __V
, const int __M
)
102 return (__m128d
) __builtin_ia32_roundpd ((__v2df
)__V
, __M
);
105 extern __inline __m128d
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
106 _mm_round_sd(__m128d __D
, __m128d __V
, const int __M
)
108 return (__m128d
) __builtin_ia32_roundsd ((__v2df
)__D
,
113 #define _mm_round_pd(V, M) \
114 ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M)))
116 #define _mm_round_sd(D, V, M) \
117 ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D), \
118 (__v2df)(__m128d)(V), (int)(M)))
121 /* Packed/scalar single precision floating point rounding. */
124 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
125 _mm_round_ps (__m128 __V
, const int __M
)
127 return (__m128
) __builtin_ia32_roundps ((__v4sf
)__V
, __M
);
130 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
131 _mm_round_ss (__m128 __D
, __m128 __V
, const int __M
)
133 return (__m128
) __builtin_ia32_roundss ((__v4sf
)__D
,
138 #define _mm_round_ps(V, M) \
139 ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M)))
141 #define _mm_round_ss(D, V, M) \
142 ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D), \
143 (__v4sf)(__m128)(V), (int)(M)))
146 /* Macros for ceil/floor intrinsics. */
147 #define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
148 #define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
150 #define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
151 #define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
153 #define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
154 #define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
156 #define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
157 #define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
161 /* Integer blend instructions - select data from 2 sources using
162 constant/variable mask. */
165 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
166 _mm_blend_epi16 (__m128i __X
, __m128i __Y
, const int __M
)
168 return (__m128i
) __builtin_ia32_pblendw128 ((__v8hi
)__X
,
173 #define _mm_blend_epi16(X, Y, M) \
174 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \
175 (__v8hi)(__m128i)(Y), (int)(M)))
178 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
179 _mm_blendv_epi8 (__m128i __X
, __m128i __Y
, __m128i __M
)
181 return (__m128i
) __builtin_ia32_pblendvb128 ((__v16qi
)__X
,
186 /* Single precision floating point blend instructions - select data
187 from 2 sources using constant/variable mask. */
190 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
191 _mm_blend_ps (__m128 __X
, __m128 __Y
, const int __M
)
193 return (__m128
) __builtin_ia32_blendps ((__v4sf
)__X
,
198 #define _mm_blend_ps(X, Y, M) \
199 ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \
200 (__v4sf)(__m128)(Y), (int)(M)))
203 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
204 _mm_blendv_ps (__m128 __X
, __m128 __Y
, __m128 __M
)
206 return (__m128
) __builtin_ia32_blendvps ((__v4sf
)__X
,
211 /* Double precision floating point blend instructions - select data
212 from 2 sources using constant/variable mask. */
215 extern __inline __m128d
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
216 _mm_blend_pd (__m128d __X
, __m128d __Y
, const int __M
)
218 return (__m128d
) __builtin_ia32_blendpd ((__v2df
)__X
,
223 #define _mm_blend_pd(X, Y, M) \
224 ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \
225 (__v2df)(__m128d)(Y), (int)(M)))
228 extern __inline __m128d
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
229 _mm_blendv_pd (__m128d __X
, __m128d __Y
, __m128d __M
)
231 return (__m128d
) __builtin_ia32_blendvpd ((__v2df
)__X
,
236 /* Dot product instructions with mask-defined summing and zeroing parts
240 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
241 _mm_dp_ps (__m128 __X
, __m128 __Y
, const int __M
)
243 return (__m128
) __builtin_ia32_dpps ((__v4sf
)__X
,
248 extern __inline __m128d
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
249 _mm_dp_pd (__m128d __X
, __m128d __Y
, const int __M
)
251 return (__m128d
) __builtin_ia32_dppd ((__v2df
)__X
,
256 #define _mm_dp_ps(X, Y, M) \
257 ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \
258 (__v4sf)(__m128)(Y), (int)(M)))
260 #define _mm_dp_pd(X, Y, M) \
261 ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \
262 (__v2df)(__m128d)(Y), (int)(M)))
265 /* Packed integer 64-bit comparison, zeroing or filling with ones
266 corresponding parts of result. */
267 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
268 _mm_cmpeq_epi64 (__m128i __X
, __m128i __Y
)
270 return (__m128i
) ((__v2di
)__X
== (__v2di
)__Y
);
273 /* Min/max packed integer instructions. */
275 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
276 _mm_min_epi8 (__m128i __X
, __m128i __Y
)
278 return (__m128i
) __builtin_ia32_pminsb128 ((__v16qi
)__X
, (__v16qi
)__Y
);
281 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
282 _mm_max_epi8 (__m128i __X
, __m128i __Y
)
284 return (__m128i
) __builtin_ia32_pmaxsb128 ((__v16qi
)__X
, (__v16qi
)__Y
);
287 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
288 _mm_min_epu16 (__m128i __X
, __m128i __Y
)
290 return (__m128i
) __builtin_ia32_pminuw128 ((__v8hi
)__X
, (__v8hi
)__Y
);
293 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
294 _mm_max_epu16 (__m128i __X
, __m128i __Y
)
296 return (__m128i
) __builtin_ia32_pmaxuw128 ((__v8hi
)__X
, (__v8hi
)__Y
);
299 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
300 _mm_min_epi32 (__m128i __X
, __m128i __Y
)
302 return (__m128i
) __builtin_ia32_pminsd128 ((__v4si
)__X
, (__v4si
)__Y
);
305 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
306 _mm_max_epi32 (__m128i __X
, __m128i __Y
)
308 return (__m128i
) __builtin_ia32_pmaxsd128 ((__v4si
)__X
, (__v4si
)__Y
);
311 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
312 _mm_min_epu32 (__m128i __X
, __m128i __Y
)
314 return (__m128i
) __builtin_ia32_pminud128 ((__v4si
)__X
, (__v4si
)__Y
);
317 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
318 _mm_max_epu32 (__m128i __X
, __m128i __Y
)
320 return (__m128i
) __builtin_ia32_pmaxud128 ((__v4si
)__X
, (__v4si
)__Y
);
323 /* Packed integer 32-bit multiplication with truncation of upper
324 halves of results. */
325 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
326 _mm_mullo_epi32 (__m128i __X
, __m128i __Y
)
328 return (__m128i
) ((__v4su
)__X
* (__v4su
)__Y
);
331 /* Packed integer 32-bit multiplication of 2 pairs of operands
332 with two 64-bit results. */
333 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
334 _mm_mul_epi32 (__m128i __X
, __m128i __Y
)
336 return (__m128i
) __builtin_ia32_pmuldq128 ((__v4si
)__X
, (__v4si
)__Y
);
339 /* Insert single precision float into packed single precision array
340 element selected by index N. The bits [7-6] of N define S
341 index, the bits [5-4] define D index, and bits [3-0] define
342 zeroing mask for D. */
345 extern __inline __m128
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
346 _mm_insert_ps (__m128 __D
, __m128 __S
, const int __N
)
348 return (__m128
) __builtin_ia32_insertps128 ((__v4sf
)__D
,
353 #define _mm_insert_ps(D, S, N) \
354 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \
355 (__v4sf)(__m128)(S), (int)(N)))
358 /* Helper macro to create the N value for _mm_insert_ps. */
359 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
361 /* Extract binary representation of single precision float from packed
362 single precision array element of X selected by index N. */
365 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
366 _mm_extract_ps (__m128 __X
, const int __N
)
368 union { int __i
; float __f
; } __tmp
;
369 __tmp
.__f
= __builtin_ia32_vec_ext_v4sf ((__v4sf
)__X
, __N
);
373 #define _mm_extract_ps(X, N) \
376 union { int __i; float __f; } __tmp; \
377 __tmp.__f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), \
383 /* Extract binary representation of single precision float into
384 D from packed single precision array element of S selected
386 #define _MM_EXTRACT_FLOAT(D, S, N) \
387 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
389 /* Extract specified single precision float element into the lower
391 #define _MM_PICK_OUT_PS(X, N) \
392 _mm_insert_ps (_mm_setzero_ps (), (X), \
393 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
395 /* Insert integer, S, into packed integer array element of D
396 selected by index N. */
399 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
400 _mm_insert_epi8 (__m128i __D
, int __S
, const int __N
)
402 return (__m128i
) __builtin_ia32_vec_set_v16qi ((__v16qi
)__D
,
406 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
407 _mm_insert_epi32 (__m128i __D
, int __S
, const int __N
)
409 return (__m128i
) __builtin_ia32_vec_set_v4si ((__v4si
)__D
,
414 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
415 _mm_insert_epi64 (__m128i __D
, long long __S
, const int __N
)
417 return (__m128i
) __builtin_ia32_vec_set_v2di ((__v2di
)__D
,
422 #define _mm_insert_epi8(D, S, N) \
423 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \
426 #define _mm_insert_epi32(D, S, N) \
427 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \
431 #define _mm_insert_epi64(D, S, N) \
432 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \
433 (long long)(S), (int)(N)))
437 /* Extract integer from packed integer array element of X selected by
441 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
442 _mm_extract_epi8 (__m128i __X
, const int __N
)
444 return (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi
)__X
, __N
);
447 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
448 _mm_extract_epi32 (__m128i __X
, const int __N
)
450 return __builtin_ia32_vec_ext_v4si ((__v4si
)__X
, __N
);
454 extern __inline
long long __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
455 _mm_extract_epi64 (__m128i __X
, const int __N
)
457 return __builtin_ia32_vec_ext_v2di ((__v2di
)__X
, __N
);
461 #define _mm_extract_epi8(X, N) \
462 ((int) (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N)))
463 #define _mm_extract_epi32(X, N) \
464 ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N)))
467 #define _mm_extract_epi64(X, N) \
468 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N)))
472 /* Return horizontal packed word minimum and its index in bits [15:0]
473 and bits [18:16] respectively. */
474 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
475 _mm_minpos_epu16 (__m128i __X
)
477 return (__m128i
) __builtin_ia32_phminposuw128 ((__v8hi
)__X
);
480 /* Packed integer sign-extension. */
482 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
483 _mm_cvtepi8_epi32 (__m128i __X
)
485 return (__m128i
) __builtin_ia32_pmovsxbd128 ((__v16qi
)__X
);
488 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
489 _mm_cvtepi16_epi32 (__m128i __X
)
491 return (__m128i
) __builtin_ia32_pmovsxwd128 ((__v8hi
)__X
);
494 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
495 _mm_cvtepi8_epi64 (__m128i __X
)
497 return (__m128i
) __builtin_ia32_pmovsxbq128 ((__v16qi
)__X
);
500 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
501 _mm_cvtepi32_epi64 (__m128i __X
)
503 return (__m128i
) __builtin_ia32_pmovsxdq128 ((__v4si
)__X
);
506 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
507 _mm_cvtepi16_epi64 (__m128i __X
)
509 return (__m128i
) __builtin_ia32_pmovsxwq128 ((__v8hi
)__X
);
512 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
513 _mm_cvtepi8_epi16 (__m128i __X
)
515 return (__m128i
) __builtin_ia32_pmovsxbw128 ((__v16qi
)__X
);
518 /* Packed integer zero-extension. */
520 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
521 _mm_cvtepu8_epi32 (__m128i __X
)
523 return (__m128i
) __builtin_ia32_pmovzxbd128 ((__v16qi
)__X
);
526 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
527 _mm_cvtepu16_epi32 (__m128i __X
)
529 return (__m128i
) __builtin_ia32_pmovzxwd128 ((__v8hi
)__X
);
532 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
533 _mm_cvtepu8_epi64 (__m128i __X
)
535 return (__m128i
) __builtin_ia32_pmovzxbq128 ((__v16qi
)__X
);
538 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
539 _mm_cvtepu32_epi64 (__m128i __X
)
541 return (__m128i
) __builtin_ia32_pmovzxdq128 ((__v4si
)__X
);
544 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
545 _mm_cvtepu16_epi64 (__m128i __X
)
547 return (__m128i
) __builtin_ia32_pmovzxwq128 ((__v8hi
)__X
);
550 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
551 _mm_cvtepu8_epi16 (__m128i __X
)
553 return (__m128i
) __builtin_ia32_pmovzxbw128 ((__v16qi
)__X
);
556 /* Pack 8 double words from 2 operands into 8 words of result with
557 unsigned saturation. */
558 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
559 _mm_packus_epi32 (__m128i __X
, __m128i __Y
)
561 return (__m128i
) __builtin_ia32_packusdw128 ((__v4si
)__X
, (__v4si
)__Y
);
564 /* Sum absolute 8-bit integer difference of adjacent groups of 4
565 byte integers in the first 2 operands. Starting offsets within
566 operands are determined by the 3rd mask operand. */
569 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
570 _mm_mpsadbw_epu8 (__m128i __X
, __m128i __Y
, const int __M
)
572 return (__m128i
) __builtin_ia32_mpsadbw128 ((__v16qi
)__X
,
576 #define _mm_mpsadbw_epu8(X, Y, M) \
577 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \
578 (__v16qi)(__m128i)(Y), (int)(M)))
581 /* Load double quadword using non-temporal aligned hint. */
582 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
583 _mm_stream_load_si128 (__m128i
*__X
)
585 return (__m128i
) __builtin_ia32_movntdqa ((__v2di
*) __X
);
589 #pragma GCC push_options
590 #pragma GCC target("sse4.2")
591 #define __DISABLE_SSE4_2__
592 #endif /* __SSE4_2__ */
594 /* These macros specify the source data format. */
595 #define _SIDD_UBYTE_OPS 0x00
596 #define _SIDD_UWORD_OPS 0x01
597 #define _SIDD_SBYTE_OPS 0x02
598 #define _SIDD_SWORD_OPS 0x03
600 /* These macros specify the comparison operation. */
601 #define _SIDD_CMP_EQUAL_ANY 0x00
602 #define _SIDD_CMP_RANGES 0x04
603 #define _SIDD_CMP_EQUAL_EACH 0x08
604 #define _SIDD_CMP_EQUAL_ORDERED 0x0c
606 /* These macros specify the polarity. */
607 #define _SIDD_POSITIVE_POLARITY 0x00
608 #define _SIDD_NEGATIVE_POLARITY 0x10
609 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
610 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
612 /* These macros specify the output selection in _mm_cmpXstri (). */
613 #define _SIDD_LEAST_SIGNIFICANT 0x00
614 #define _SIDD_MOST_SIGNIFICANT 0x40
616 /* These macros specify the output selection in _mm_cmpXstrm (). */
617 #define _SIDD_BIT_MASK 0x00
618 #define _SIDD_UNIT_MASK 0x40
620 /* Intrinsics for text/string processing. */
623 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
624 _mm_cmpistrm (__m128i __X
, __m128i __Y
, const int __M
)
626 return (__m128i
) __builtin_ia32_pcmpistrm128 ((__v16qi
)__X
,
631 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
632 _mm_cmpistri (__m128i __X
, __m128i __Y
, const int __M
)
634 return __builtin_ia32_pcmpistri128 ((__v16qi
)__X
,
639 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
640 _mm_cmpestrm (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
642 return (__m128i
) __builtin_ia32_pcmpestrm128 ((__v16qi
)__X
, __LX
,
647 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
648 _mm_cmpestri (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
650 return __builtin_ia32_pcmpestri128 ((__v16qi
)__X
, __LX
,
655 #define _mm_cmpistrm(X, Y, M) \
656 ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \
657 (__v16qi)(__m128i)(Y), (int)(M)))
658 #define _mm_cmpistri(X, Y, M) \
659 ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \
660 (__v16qi)(__m128i)(Y), (int)(M)))
662 #define _mm_cmpestrm(X, LX, Y, LY, M) \
663 ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \
664 (int)(LX), (__v16qi)(__m128i)(Y), \
665 (int)(LY), (int)(M)))
666 #define _mm_cmpestri(X, LX, Y, LY, M) \
667 ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \
668 (__v16qi)(__m128i)(Y), (int)(LY), \
672 /* Intrinsics for text/string processing and reading values of
676 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
677 _mm_cmpistra (__m128i __X
, __m128i __Y
, const int __M
)
679 return __builtin_ia32_pcmpistria128 ((__v16qi
)__X
,
684 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
685 _mm_cmpistrc (__m128i __X
, __m128i __Y
, const int __M
)
687 return __builtin_ia32_pcmpistric128 ((__v16qi
)__X
,
692 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
693 _mm_cmpistro (__m128i __X
, __m128i __Y
, const int __M
)
695 return __builtin_ia32_pcmpistrio128 ((__v16qi
)__X
,
700 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
701 _mm_cmpistrs (__m128i __X
, __m128i __Y
, const int __M
)
703 return __builtin_ia32_pcmpistris128 ((__v16qi
)__X
,
708 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
709 _mm_cmpistrz (__m128i __X
, __m128i __Y
, const int __M
)
711 return __builtin_ia32_pcmpistriz128 ((__v16qi
)__X
,
716 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
717 _mm_cmpestra (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
719 return __builtin_ia32_pcmpestria128 ((__v16qi
)__X
, __LX
,
724 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
725 _mm_cmpestrc (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
727 return __builtin_ia32_pcmpestric128 ((__v16qi
)__X
, __LX
,
732 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
733 _mm_cmpestro (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
735 return __builtin_ia32_pcmpestrio128 ((__v16qi
)__X
, __LX
,
740 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
741 _mm_cmpestrs (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
743 return __builtin_ia32_pcmpestris128 ((__v16qi
)__X
, __LX
,
748 extern __inline
int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
749 _mm_cmpestrz (__m128i __X
, int __LX
, __m128i __Y
, int __LY
, const int __M
)
751 return __builtin_ia32_pcmpestriz128 ((__v16qi
)__X
, __LX
,
756 #define _mm_cmpistra(X, Y, M) \
757 ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \
758 (__v16qi)(__m128i)(Y), (int)(M)))
759 #define _mm_cmpistrc(X, Y, M) \
760 ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \
761 (__v16qi)(__m128i)(Y), (int)(M)))
762 #define _mm_cmpistro(X, Y, M) \
763 ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \
764 (__v16qi)(__m128i)(Y), (int)(M)))
765 #define _mm_cmpistrs(X, Y, M) \
766 ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \
767 (__v16qi)(__m128i)(Y), (int)(M)))
768 #define _mm_cmpistrz(X, Y, M) \
769 ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \
770 (__v16qi)(__m128i)(Y), (int)(M)))
772 #define _mm_cmpestra(X, LX, Y, LY, M) \
773 ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \
774 (__v16qi)(__m128i)(Y), (int)(LY), \
776 #define _mm_cmpestrc(X, LX, Y, LY, M) \
777 ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \
778 (__v16qi)(__m128i)(Y), (int)(LY), \
780 #define _mm_cmpestro(X, LX, Y, LY, M) \
781 ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \
782 (__v16qi)(__m128i)(Y), (int)(LY), \
784 #define _mm_cmpestrs(X, LX, Y, LY, M) \
785 ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \
786 (__v16qi)(__m128i)(Y), (int)(LY), \
788 #define _mm_cmpestrz(X, LX, Y, LY, M) \
789 ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \
790 (__v16qi)(__m128i)(Y), (int)(LY), \
794 /* Packed integer 64-bit comparison, zeroing or filling with ones
795 corresponding parts of result. */
796 extern __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
797 _mm_cmpgt_epi64 (__m128i __X
, __m128i __Y
)
799 return (__m128i
) ((__v2di
)__X
> (__v2di
)__Y
);
802 #ifdef __DISABLE_SSE4_2__
803 #undef __DISABLE_SSE4_2__
804 #pragma GCC pop_options
805 #endif /* __DISABLE_SSE4_2__ */
807 #ifdef __DISABLE_SSE4_1__
808 #undef __DISABLE_SSE4_1__
809 #pragma GCC pop_options
810 #endif /* __DISABLE_SSE4_1__ */
812 #include <popcntintrin.h>
815 #pragma GCC push_options
816 #pragma GCC target("crc32")
817 #define __DISABLE_CRC32__
818 #endif /* __CRC32__ */
820 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
821 extern __inline
unsigned int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
822 _mm_crc32_u8 (unsigned int __C
, unsigned char __V
)
824 return __builtin_ia32_crc32qi (__C
, __V
);
827 extern __inline
unsigned int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
828 _mm_crc32_u16 (unsigned int __C
, unsigned short __V
)
830 return __builtin_ia32_crc32hi (__C
, __V
);
833 extern __inline
unsigned int __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
834 _mm_crc32_u32 (unsigned int __C
, unsigned int __V
)
836 return __builtin_ia32_crc32si (__C
, __V
);
840 extern __inline
unsigned long long __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
841 _mm_crc32_u64 (unsigned long long __C
, unsigned long long __V
)
843 return __builtin_ia32_crc32di (__C
, __V
);
847 #ifdef __DISABLE_CRC32__
848 #undef __DISABLE_CRC32__
849 #pragma GCC pop_options
850 #endif /* __DISABLE_CRC32__ */
852 #endif /* _SMMINTRIN_H_INCLUDED */