]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/i386/smmintrin.h
Update copyright years.
[thirdparty/gcc.git] / gcc / config / i386 / smmintrin.h
CommitLineData
83ffe9cd 1/* Copyright (C) 2007-2023 Free Software Foundation, Inc.
9a5cee02
L
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
748086b7 7 the Free Software Foundation; either version 3, or (at your option)
9a5cee02
L
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
748086b7
JJ
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
b7613c91 22 <http://www.gnu.org/licenses/>. */
9a5cee02
L
23
24/* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 10.0. */
26
27#ifndef _SMMINTRIN_H_INCLUDED
28#define _SMMINTRIN_H_INCLUDED
29
9a5cee02
L
30/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
31 files. */
32#include <tmmintrin.h>
a795ca6e 33
97db2bf7
ST
34#ifndef __SSE4_1__
35#pragma GCC push_options
36#pragma GCC target("sse4.1")
37#define __DISABLE_SSE4_1__
38#endif /* __SSE4_1__ */
39
a795ca6e
HJ
40/* Rounding mode macros. */
41#define _MM_FROUND_TO_NEAREST_INT 0x00
42#define _MM_FROUND_TO_NEG_INF 0x01
43#define _MM_FROUND_TO_POS_INF 0x02
44#define _MM_FROUND_TO_ZERO 0x03
45#define _MM_FROUND_CUR_DIRECTION 0x04
46
47#define _MM_FROUND_RAISE_EXC 0x00
48#define _MM_FROUND_NO_EXC 0x08
49
50#define _MM_FROUND_NINT \
51 (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
52#define _MM_FROUND_FLOOR \
53 (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
54#define _MM_FROUND_CEIL \
55 (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
56#define _MM_FROUND_TRUNC \
57 (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
58#define _MM_FROUND_RINT \
59 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
60#define _MM_FROUND_NEARBYINT \
61 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
62
63/* Test Instruction */
64/* Packed integer 128-bit bitwise comparison. Return 1 if
65 (__V & __M) == 0. */
66extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
67_mm_testz_si128 (__m128i __M, __m128i __V)
68{
69 return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
70}
71
72/* Packed integer 128-bit bitwise comparison. Return 1 if
73 (__V & ~__M) == 0. */
74extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
75_mm_testc_si128 (__m128i __M, __m128i __V)
76{
77 return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
78}
79
80/* Packed integer 128-bit bitwise comparison. Return 1 if
81 (__V & __M) != 0 && (__V & ~__M) != 0. */
82extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
83_mm_testnzc_si128 (__m128i __M, __m128i __V)
84{
85 return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
86}
87
88/* Macros for packed integer 128-bit comparison intrinsics. */
89#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
90
91#define _mm_test_all_ones(V) \
92 _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
93
94#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
95
96/* Packed/scalar double precision floating point rounding. */
97
98#ifdef __OPTIMIZE__
99extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
100_mm_round_pd (__m128d __V, const int __M)
101{
102 return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
103}
104
105extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
106_mm_round_sd(__m128d __D, __m128d __V, const int __M)
107{
108 return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
109 (__v2df)__V,
110 __M);
111}
112#else
113#define _mm_round_pd(V, M) \
114 ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M)))
115
116#define _mm_round_sd(D, V, M) \
117 ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D), \
118 (__v2df)(__m128d)(V), (int)(M)))
119#endif
120
121/* Packed/scalar single precision floating point rounding. */
122
123#ifdef __OPTIMIZE__
124extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
125_mm_round_ps (__m128 __V, const int __M)
126{
127 return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
128}
129
130extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131_mm_round_ss (__m128 __D, __m128 __V, const int __M)
132{
133 return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
134 (__v4sf)__V,
135 __M);
136}
137#else
138#define _mm_round_ps(V, M) \
139 ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M)))
140
141#define _mm_round_ss(D, V, M) \
142 ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D), \
143 (__v4sf)(__m128)(V), (int)(M)))
144#endif
145
146/* Macros for ceil/floor intrinsics. */
147#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
148#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
149
150#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
151#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
152
153#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
154#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
155
156#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
157#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
9a5cee02
L
158
159/* SSE4.1 */
160
9a5cee02
L
161/* Integer blend instructions - select data from 2 sources using
162 constant/variable mask. */
163
8ec3e357 164#ifdef __OPTIMIZE__
1359ef39 165extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
166_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
167{
168 return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
169 (__v8hi)__Y,
170 __M);
171}
172#else
bfcd7233
UB
173#define _mm_blend_epi16(X, Y, M) \
174 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \
175 (__v8hi)(__m128i)(Y), (int)(M)))
9a5cee02
L
176#endif
177
1359ef39 178extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
179_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
180{
181 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
182 (__v16qi)__Y,
183 (__v16qi)__M);
184}
185
186/* Single precision floating point blend instructions - select data
187 from 2 sources using constant/variable mask. */
188
8ec3e357 189#ifdef __OPTIMIZE__
1359ef39 190extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
191_mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
192{
193 return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
194 (__v4sf)__Y,
195 __M);
196}
197#else
bfcd7233
UB
198#define _mm_blend_ps(X, Y, M) \
199 ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \
200 (__v4sf)(__m128)(Y), (int)(M)))
9a5cee02
L
201#endif
202
1359ef39 203extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
204_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
205{
206 return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
207 (__v4sf)__Y,
208 (__v4sf)__M);
209}
210
211/* Double precision floating point blend instructions - select data
212 from 2 sources using constant/variable mask. */
213
8ec3e357 214#ifdef __OPTIMIZE__
1359ef39 215extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
216_mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
217{
218 return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
219 (__v2df)__Y,
220 __M);
221}
222#else
bfcd7233
UB
223#define _mm_blend_pd(X, Y, M) \
224 ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \
225 (__v2df)(__m128d)(Y), (int)(M)))
9a5cee02
L
226#endif
227
1359ef39 228extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
229_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
230{
231 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
232 (__v2df)__Y,
233 (__v2df)__M);
234}
235
236/* Dot product instructions with mask-defined summing and zeroing parts
237 of result. */
238
8ec3e357 239#ifdef __OPTIMIZE__
1359ef39 240extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
241_mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
242{
243 return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
244 (__v4sf)__Y,
245 __M);
246}
247
1359ef39 248extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
249_mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
250{
251 return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
252 (__v2df)__Y,
253 __M);
254}
255#else
bfcd7233
UB
256#define _mm_dp_ps(X, Y, M) \
257 ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \
258 (__v4sf)(__m128)(Y), (int)(M)))
9a5cee02 259
bfcd7233
UB
260#define _mm_dp_pd(X, Y, M) \
261 ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \
262 (__v2df)(__m128d)(Y), (int)(M)))
9a5cee02
L
263#endif
264
265/* Packed integer 64-bit comparison, zeroing or filling with ones
266 corresponding parts of result. */
1359ef39 267extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
268_mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
269{
2069d6fc 270 return (__m128i) ((__v2di)__X == (__v2di)__Y);
9a5cee02
L
271}
272
273/* Min/max packed integer instructions. */
274
1359ef39 275extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
276_mm_min_epi8 (__m128i __X, __m128i __Y)
277{
278 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
279}
280
1359ef39 281extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
282_mm_max_epi8 (__m128i __X, __m128i __Y)
283{
284 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
285}
286
1359ef39 287extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
288_mm_min_epu16 (__m128i __X, __m128i __Y)
289{
290 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
291}
292
1359ef39 293extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
294_mm_max_epu16 (__m128i __X, __m128i __Y)
295{
296 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
297}
298
1359ef39 299extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
300_mm_min_epi32 (__m128i __X, __m128i __Y)
301{
302 return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
303}
304
1359ef39 305extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
306_mm_max_epi32 (__m128i __X, __m128i __Y)
307{
308 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
309}
310
1359ef39 311extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
312_mm_min_epu32 (__m128i __X, __m128i __Y)
313{
314 return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
315}
316
1359ef39 317extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
318_mm_max_epu32 (__m128i __X, __m128i __Y)
319{
320 return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
321}
322
323/* Packed integer 32-bit multiplication with truncation of upper
324 halves of results. */
1359ef39 325extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
326_mm_mullo_epi32 (__m128i __X, __m128i __Y)
327{
2069d6fc 328 return (__m128i) ((__v4su)__X * (__v4su)__Y);
9a5cee02
L
329}
330
331/* Packed integer 32-bit multiplication of 2 pairs of operands
332 with two 64-bit results. */
1359ef39 333extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
334_mm_mul_epi32 (__m128i __X, __m128i __Y)
335{
336 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
337}
338
9a5cee02
L
339/* Insert single precision float into packed single precision array
340 element selected by index N. The bits [7-6] of N define S
341 index, the bits [5-4] define D index, and bits [3-0] define
342 zeroing mask for D. */
343
8ec3e357 344#ifdef __OPTIMIZE__
1359ef39 345extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
346_mm_insert_ps (__m128 __D, __m128 __S, const int __N)
347{
348 return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
349 (__v4sf)__S,
350 __N);
351}
352#else
bfcd7233
UB
353#define _mm_insert_ps(D, S, N) \
354 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \
355 (__v4sf)(__m128)(S), (int)(N)))
9a5cee02
L
356#endif
357
358/* Helper macro to create the N value for _mm_insert_ps. */
359#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
360
361/* Extract binary representation of single precision float from packed
362 single precision array element of X selected by index N. */
363
8ec3e357 364#ifdef __OPTIMIZE__
1359ef39 365extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
366_mm_extract_ps (__m128 __X, const int __N)
367{
ec8ec09f
JJ
368 union { int __i; float __f; } __tmp;
369 __tmp.__f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
370 return __tmp.__i;
9a5cee02
L
371}
372#else
bfcd7233
UB
373#define _mm_extract_ps(X, N) \
374 (__extension__ \
375 ({ \
ec8ec09f
JJ
376 union { int __i; float __f; } __tmp; \
377 __tmp.__f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), \
378 (int)(N)); \
379 __tmp.__i; \
bfcd7233 380 }))
9a5cee02
L
381#endif
382
383/* Extract binary representation of single precision float into
384 D from packed single precision array element of S selected
385 by index N. */
386#define _MM_EXTRACT_FLOAT(D, S, N) \
387 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
388
389/* Extract specified single precision float element into the lower
390 part of __m128. */
391#define _MM_PICK_OUT_PS(X, N) \
392 _mm_insert_ps (_mm_setzero_ps (), (X), \
393 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
394
395/* Insert integer, S, into packed integer array element of D
396 selected by index N. */
397
8ec3e357 398#ifdef __OPTIMIZE__
1359ef39 399extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
400_mm_insert_epi8 (__m128i __D, int __S, const int __N)
401{
402 return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
403 __S, __N);
404}
405
1359ef39 406extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
407_mm_insert_epi32 (__m128i __D, int __S, const int __N)
408{
409 return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
410 __S, __N);
411}
412
413#ifdef __x86_64__
1359ef39 414extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
415_mm_insert_epi64 (__m128i __D, long long __S, const int __N)
416{
417 return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
418 __S, __N);
419}
420#endif
421#else
bfcd7233
UB
422#define _mm_insert_epi8(D, S, N) \
423 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \
424 (int)(S), (int)(N)))
9a5cee02 425
bfcd7233
UB
426#define _mm_insert_epi32(D, S, N) \
427 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \
428 (int)(S), (int)(N)))
9a5cee02
L
429
430#ifdef __x86_64__
bfcd7233
UB
431#define _mm_insert_epi64(D, S, N) \
432 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \
433 (long long)(S), (int)(N)))
9a5cee02
L
434#endif
435#endif
436
437/* Extract integer from packed integer array element of X selected by
438 index N. */
439
8ec3e357 440#ifdef __OPTIMIZE__
1359ef39 441extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
442_mm_extract_epi8 (__m128i __X, const int __N)
443{
410841c6 444 return (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
9a5cee02
L
445}
446
1359ef39 447extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
448_mm_extract_epi32 (__m128i __X, const int __N)
449{
450 return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
451}
452
453#ifdef __x86_64__
1359ef39 454extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
455_mm_extract_epi64 (__m128i __X, const int __N)
456{
457 return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
458}
459#endif
460#else
461#define _mm_extract_epi8(X, N) \
410841c6 462 ((int) (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N)))
9a5cee02 463#define _mm_extract_epi32(X, N) \
2c9fd13e 464 ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N)))
9a5cee02
L
465
466#ifdef __x86_64__
467#define _mm_extract_epi64(X, N) \
bfcd7233 468 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N)))
9a5cee02
L
469#endif
470#endif
471
472/* Return horizontal packed word minimum and its index in bits [15:0]
473 and bits [18:16] respectively. */
1359ef39 474extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
475_mm_minpos_epu16 (__m128i __X)
476{
477 return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
478}
479
9a5cee02
L
480/* Packed integer sign-extension. */
481
1359ef39 482extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
483_mm_cvtepi8_epi32 (__m128i __X)
484{
485 return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
486}
487
1359ef39 488extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
489_mm_cvtepi16_epi32 (__m128i __X)
490{
491 return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
492}
493
1359ef39 494extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
495_mm_cvtepi8_epi64 (__m128i __X)
496{
497 return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
498}
499
1359ef39 500extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
501_mm_cvtepi32_epi64 (__m128i __X)
502{
503 return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
504}
505
1359ef39 506extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
507_mm_cvtepi16_epi64 (__m128i __X)
508{
509 return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
510}
511
1359ef39 512extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
513_mm_cvtepi8_epi16 (__m128i __X)
514{
515 return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
516}
517
518/* Packed integer zero-extension. */
519
1359ef39 520extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
521_mm_cvtepu8_epi32 (__m128i __X)
522{
523 return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
524}
525
1359ef39 526extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
527_mm_cvtepu16_epi32 (__m128i __X)
528{
529 return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
530}
531
1359ef39 532extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
533_mm_cvtepu8_epi64 (__m128i __X)
534{
535 return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
536}
537
1359ef39 538extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
539_mm_cvtepu32_epi64 (__m128i __X)
540{
541 return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
542}
543
1359ef39 544extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
545_mm_cvtepu16_epi64 (__m128i __X)
546{
547 return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
548}
549
1359ef39 550extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
551_mm_cvtepu8_epi16 (__m128i __X)
552{
553 return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
554}
555
556/* Pack 8 double words from 2 operands into 8 words of result with
557 unsigned saturation. */
1359ef39 558extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
559_mm_packus_epi32 (__m128i __X, __m128i __Y)
560{
561 return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
562}
563
564/* Sum absolute 8-bit integer difference of adjacent groups of 4
565 byte integers in the first 2 operands. Starting offsets within
566 operands are determined by the 3rd mask operand. */
567
8ec3e357 568#ifdef __OPTIMIZE__
1359ef39 569extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
570_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
571{
572 return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
573 (__v16qi)__Y, __M);
574}
575#else
bfcd7233
UB
576#define _mm_mpsadbw_epu8(X, Y, M) \
577 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \
578 (__v16qi)(__m128i)(Y), (int)(M)))
9a5cee02
L
579#endif
580
581/* Load double quadword using non-temporal aligned hint. */
1359ef39 582extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
9a5cee02
L
583_mm_stream_load_si128 (__m128i *__X)
584{
585 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
586}
587
97db2bf7
ST
588#ifndef __SSE4_2__
589#pragma GCC push_options
590#pragma GCC target("sse4.2")
591#define __DISABLE_SSE4_2__
592#endif /* __SSE4_2__ */
3b8dd071
L
593
594/* These macros specify the source data format. */
b5220972
L
595#define _SIDD_UBYTE_OPS 0x00
596#define _SIDD_UWORD_OPS 0x01
597#define _SIDD_SBYTE_OPS 0x02
598#define _SIDD_SWORD_OPS 0x03
3b8dd071
L
599
600/* These macros specify the comparison operation. */
b5220972
L
601#define _SIDD_CMP_EQUAL_ANY 0x00
602#define _SIDD_CMP_RANGES 0x04
603#define _SIDD_CMP_EQUAL_EACH 0x08
604#define _SIDD_CMP_EQUAL_ORDERED 0x0c
3b8dd071 605
dd5a833e 606/* These macros specify the polarity. */
b5220972
L
607#define _SIDD_POSITIVE_POLARITY 0x00
608#define _SIDD_NEGATIVE_POLARITY 0x10
609#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
610#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
3b8dd071
L
611
612/* These macros specify the output selection in _mm_cmpXstri (). */
b5220972
L
613#define _SIDD_LEAST_SIGNIFICANT 0x00
614#define _SIDD_MOST_SIGNIFICANT 0x40
3b8dd071
L
615
616/* These macros specify the output selection in _mm_cmpXstrm (). */
b5220972
L
617#define _SIDD_BIT_MASK 0x00
618#define _SIDD_UNIT_MASK 0x40
3b8dd071
L
619
620/* Intrinsics for text/string processing. */
621
8ec3e357 622#ifdef __OPTIMIZE__
1359ef39 623extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
624_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
625{
626 return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
627 (__v16qi)__Y,
628 __M);
629}
630
1359ef39 631extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
632_mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
633{
634 return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
635 (__v16qi)__Y,
636 __M);
637}
638
1359ef39 639extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
640_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
641{
642 return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
643 (__v16qi)__Y, __LY,
644 __M);
645}
646
1359ef39 647extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
648_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
649{
650 return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
651 (__v16qi)__Y, __LY,
652 __M);
653}
654#else
bfcd7233
UB
655#define _mm_cmpistrm(X, Y, M) \
656 ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \
657 (__v16qi)(__m128i)(Y), (int)(M)))
658#define _mm_cmpistri(X, Y, M) \
659 ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \
660 (__v16qi)(__m128i)(Y), (int)(M)))
661
662#define _mm_cmpestrm(X, LX, Y, LY, M) \
663 ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \
664 (int)(LX), (__v16qi)(__m128i)(Y), \
665 (int)(LY), (int)(M)))
666#define _mm_cmpestri(X, LX, Y, LY, M) \
667 ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \
668 (__v16qi)(__m128i)(Y), (int)(LY), \
2c9fd13e 669 (int)(M)))
3b8dd071
L
670#endif
671
672/* Intrinsics for text/string processing and reading values of
673 EFlags. */
674
8ec3e357 675#ifdef __OPTIMIZE__
1359ef39 676extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
677_mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
678{
679 return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
680 (__v16qi)__Y,
681 __M);
682}
683
1359ef39 684extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
685_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
686{
687 return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
688 (__v16qi)__Y,
689 __M);
690}
691
1359ef39 692extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
693_mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
694{
695 return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
696 (__v16qi)__Y,
697 __M);
698}
699
1359ef39 700extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
701_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
702{
703 return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
704 (__v16qi)__Y,
705 __M);
706}
707
1359ef39 708extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
709_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
710{
711 return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
712 (__v16qi)__Y,
713 __M);
714}
715
1359ef39 716extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
717_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
718{
719 return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
720 (__v16qi)__Y, __LY,
721 __M);
722}
723
1359ef39 724extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
725_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
726{
727 return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
728 (__v16qi)__Y, __LY,
729 __M);
730}
731
1359ef39 732extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
733_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
734{
735 return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
736 (__v16qi)__Y, __LY,
737 __M);
738}
739
1359ef39 740extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
741_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
742{
743 return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
744 (__v16qi)__Y, __LY,
745 __M);
746}
747
1359ef39 748extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
749_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
750{
751 return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
752 (__v16qi)__Y, __LY,
753 __M);
754}
755#else
bfcd7233
UB
756#define _mm_cmpistra(X, Y, M) \
757 ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \
758 (__v16qi)(__m128i)(Y), (int)(M)))
759#define _mm_cmpistrc(X, Y, M) \
760 ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \
761 (__v16qi)(__m128i)(Y), (int)(M)))
762#define _mm_cmpistro(X, Y, M) \
763 ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \
764 (__v16qi)(__m128i)(Y), (int)(M)))
765#define _mm_cmpistrs(X, Y, M) \
766 ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \
767 (__v16qi)(__m128i)(Y), (int)(M)))
768#define _mm_cmpistrz(X, Y, M) \
769 ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \
770 (__v16qi)(__m128i)(Y), (int)(M)))
771
772#define _mm_cmpestra(X, LX, Y, LY, M) \
773 ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \
774 (__v16qi)(__m128i)(Y), (int)(LY), \
775 (int)(M)))
776#define _mm_cmpestrc(X, LX, Y, LY, M) \
777 ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \
778 (__v16qi)(__m128i)(Y), (int)(LY), \
779 (int)(M)))
780#define _mm_cmpestro(X, LX, Y, LY, M) \
781 ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \
782 (__v16qi)(__m128i)(Y), (int)(LY), \
783 (int)(M)))
784#define _mm_cmpestrs(X, LX, Y, LY, M) \
785 ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \
786 (__v16qi)(__m128i)(Y), (int)(LY), \
787 (int)(M)))
788#define _mm_cmpestrz(X, LX, Y, LY, M) \
789 ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \
790 (__v16qi)(__m128i)(Y), (int)(LY), \
791 (int)(M)))
3b8dd071
L
792#endif
793
794/* Packed integer 64-bit comparison, zeroing or filling with ones
795 corresponding parts of result. */
1359ef39 796extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
797_mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
798{
2069d6fc 799 return (__m128i) ((__v2di)__X > (__v2di)__Y);
3b8dd071
L
800}
801
97db2bf7
ST
802#ifdef __DISABLE_SSE4_2__
803#undef __DISABLE_SSE4_2__
804#pragma GCC pop_options
805#endif /* __DISABLE_SSE4_2__ */
806
807#ifdef __DISABLE_SSE4_1__
808#undef __DISABLE_SSE4_1__
809#pragma GCC pop_options
810#endif /* __DISABLE_SSE4_1__ */
811
3bccee03 812#include <popcntintrin.h>
97db2bf7 813
a335a94a 814#ifndef __CRC32__
97db2bf7 815#pragma GCC push_options
a335a94a
HW
816#pragma GCC target("crc32")
817#define __DISABLE_CRC32__
818#endif /* __CRC32__ */
3b8dd071
L
819
820/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
1359ef39 821extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
822_mm_crc32_u8 (unsigned int __C, unsigned char __V)
823{
824 return __builtin_ia32_crc32qi (__C, __V);
825}
826
1359ef39 827extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
828_mm_crc32_u16 (unsigned int __C, unsigned short __V)
829{
830 return __builtin_ia32_crc32hi (__C, __V);
831}
832
1359ef39 833extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
834_mm_crc32_u32 (unsigned int __C, unsigned int __V)
835{
836 return __builtin_ia32_crc32si (__C, __V);
837}
838
839#ifdef __x86_64__
1359ef39 840extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3b8dd071
L
841_mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
842{
843 return __builtin_ia32_crc32di (__C, __V);
844}
845#endif
846
a335a94a
HW
847#ifdef __DISABLE_CRC32__
848#undef __DISABLE_CRC32__
97db2bf7 849#pragma GCC pop_options
a335a94a 850#endif /* __DISABLE_CRC32__ */
9a5cee02
L
851
852#endif /* _SMMINTRIN_H_INCLUDED */