]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i386/emmintrin.h
re PR target/31361 (SSE2 generation bug with shifts)
[thirdparty/gcc.git] / gcc / config / i386 / emmintrin.h
1 /* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18 Boston, MA 02110-1301, USA. */
19
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
26
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 9.0. */
29
30 #ifndef _EMMINTRIN_H_INCLUDED
31 #define _EMMINTRIN_H_INCLUDED
32
33 #ifndef __SSE2__
34 # error "SSE2 instruction set not enabled"
35 #else
36
37 /* We need definitions from the SSE header files*/
38 #include <xmmintrin.h>
39
40 /* SSE2 */
41 typedef double __v2df __attribute__ ((__vector_size__ (16)));
42 typedef long long __v2di __attribute__ ((__vector_size__ (16)));
43 typedef int __v4si __attribute__ ((__vector_size__ (16)));
44 typedef short __v8hi __attribute__ ((__vector_size__ (16)));
45 typedef char __v16qi __attribute__ ((__vector_size__ (16)));
46
47 /* The Intel API is flexible enough that we must allow aliasing with other
48 vector types, and their scalar components. */
49 typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
50 typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
51
52 /* Create a selector for use with the SHUFPD instruction. */
53 #define _MM_SHUFFLE2(fp1,fp0) \
54 (((fp1) << 1) | (fp0))
55
56 /* Create a vector with element 0 as F and the rest zero. */
57 static __inline __m128d __attribute__((__always_inline__))
58 _mm_set_sd (double __F)
59 {
60 return __extension__ (__m128d){ __F, 0 };
61 }
62
63 /* Create a vector with both elements equal to F. */
64 static __inline __m128d __attribute__((__always_inline__))
65 _mm_set1_pd (double __F)
66 {
67 return __extension__ (__m128d){ __F, __F };
68 }
69
70 static __inline __m128d __attribute__((__always_inline__))
71 _mm_set_pd1 (double __F)
72 {
73 return _mm_set1_pd (__F);
74 }
75
76 /* Create a vector with the lower value X and upper value W. */
77 static __inline __m128d __attribute__((__always_inline__))
78 _mm_set_pd (double __W, double __X)
79 {
80 return __extension__ (__m128d){ __X, __W };
81 }
82
83 /* Create a vector with the lower value W and upper value X. */
84 static __inline __m128d __attribute__((__always_inline__))
85 _mm_setr_pd (double __W, double __X)
86 {
87 return __extension__ (__m128d){ __W, __X };
88 }
89
90 /* Create a vector of zeros. */
91 static __inline __m128d __attribute__((__always_inline__))
92 _mm_setzero_pd (void)
93 {
94 return __extension__ (__m128d){ 0.0, 0.0 };
95 }
96
97 /* Sets the low DPFP value of A from the low value of B. */
98 static __inline __m128d __attribute__((__always_inline__))
99 _mm_move_sd (__m128d __A, __m128d __B)
100 {
101 return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
102 }
103
104 /* Load two DPFP values from P. The address must be 16-byte aligned. */
105 static __inline __m128d __attribute__((__always_inline__))
106 _mm_load_pd (double const *__P)
107 {
108 return *(__m128d *)__P;
109 }
110
111 /* Load two DPFP values from P. The address need not be 16-byte aligned. */
112 static __inline __m128d __attribute__((__always_inline__))
113 _mm_loadu_pd (double const *__P)
114 {
115 return __builtin_ia32_loadupd (__P);
116 }
117
118 /* Create a vector with all two elements equal to *P. */
119 static __inline __m128d __attribute__((__always_inline__))
120 _mm_load1_pd (double const *__P)
121 {
122 return _mm_set1_pd (*__P);
123 }
124
125 /* Create a vector with element 0 as *P and the rest zero. */
126 static __inline __m128d __attribute__((__always_inline__))
127 _mm_load_sd (double const *__P)
128 {
129 return _mm_set_sd (*__P);
130 }
131
132 static __inline __m128d __attribute__((__always_inline__))
133 _mm_load_pd1 (double const *__P)
134 {
135 return _mm_load1_pd (__P);
136 }
137
138 /* Load two DPFP values in reverse order. The address must be aligned. */
139 static __inline __m128d __attribute__((__always_inline__))
140 _mm_loadr_pd (double const *__P)
141 {
142 __m128d __tmp = _mm_load_pd (__P);
143 return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
144 }
145
146 /* Store two DPFP values. The address must be 16-byte aligned. */
147 static __inline void __attribute__((__always_inline__))
148 _mm_store_pd (double *__P, __m128d __A)
149 {
150 *(__m128d *)__P = __A;
151 }
152
153 /* Store two DPFP values. The address need not be 16-byte aligned. */
154 static __inline void __attribute__((__always_inline__))
155 _mm_storeu_pd (double *__P, __m128d __A)
156 {
157 __builtin_ia32_storeupd (__P, __A);
158 }
159
160 /* Stores the lower DPFP value. */
161 static __inline void __attribute__((__always_inline__))
162 _mm_store_sd (double *__P, __m128d __A)
163 {
164 *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
165 }
166
167 static __inline double __attribute__((__always_inline__))
168 _mm_cvtsd_f64 (__m128d __A)
169 {
170 return __builtin_ia32_vec_ext_v2df (__A, 0);
171 }
172
173 static __inline void __attribute__((__always_inline__))
174 _mm_storel_pd (double *__P, __m128d __A)
175 {
176 _mm_store_sd (__P, __A);
177 }
178
179 /* Stores the upper DPFP value. */
180 static __inline void __attribute__((__always_inline__))
181 _mm_storeh_pd (double *__P, __m128d __A)
182 {
183 *__P = __builtin_ia32_vec_ext_v2df (__A, 1);
184 }
185
186 /* Store the lower DPFP value across two words.
187 The address must be 16-byte aligned. */
188 static __inline void __attribute__((__always_inline__))
189 _mm_store1_pd (double *__P, __m128d __A)
190 {
191 _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0)));
192 }
193
194 static __inline void __attribute__((__always_inline__))
195 _mm_store_pd1 (double *__P, __m128d __A)
196 {
197 _mm_store1_pd (__P, __A);
198 }
199
200 /* Store two DPFP values in reverse order. The address must be aligned. */
201 static __inline void __attribute__((__always_inline__))
202 _mm_storer_pd (double *__P, __m128d __A)
203 {
204 _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1)));
205 }
206
207 static __inline int __attribute__((__always_inline__))
208 _mm_cvtsi128_si32 (__m128i __A)
209 {
210 return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0);
211 }
212
213 #ifdef __x86_64__
214 /* Intel intrinsic. */
215 static __inline long long __attribute__((__always_inline__))
216 _mm_cvtsi128_si64 (__m128i __A)
217 {
218 return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
219 }
220
221 /* Microsoft intrinsic. */
222 static __inline long long __attribute__((__always_inline__))
223 _mm_cvtsi128_si64x (__m128i __A)
224 {
225 return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
226 }
227 #endif
228
229 static __inline __m128d __attribute__((__always_inline__))
230 _mm_add_pd (__m128d __A, __m128d __B)
231 {
232 return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
233 }
234
235 static __inline __m128d __attribute__((__always_inline__))
236 _mm_add_sd (__m128d __A, __m128d __B)
237 {
238 return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
239 }
240
241 static __inline __m128d __attribute__((__always_inline__))
242 _mm_sub_pd (__m128d __A, __m128d __B)
243 {
244 return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
245 }
246
247 static __inline __m128d __attribute__((__always_inline__))
248 _mm_sub_sd (__m128d __A, __m128d __B)
249 {
250 return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
251 }
252
253 static __inline __m128d __attribute__((__always_inline__))
254 _mm_mul_pd (__m128d __A, __m128d __B)
255 {
256 return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
257 }
258
259 static __inline __m128d __attribute__((__always_inline__))
260 _mm_mul_sd (__m128d __A, __m128d __B)
261 {
262 return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
263 }
264
265 static __inline __m128d __attribute__((__always_inline__))
266 _mm_div_pd (__m128d __A, __m128d __B)
267 {
268 return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
269 }
270
271 static __inline __m128d __attribute__((__always_inline__))
272 _mm_div_sd (__m128d __A, __m128d __B)
273 {
274 return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
275 }
276
277 static __inline __m128d __attribute__((__always_inline__))
278 _mm_sqrt_pd (__m128d __A)
279 {
280 return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
281 }
282
283 /* Return pair {sqrt (A[0), B[1]}. */
284 static __inline __m128d __attribute__((__always_inline__))
285 _mm_sqrt_sd (__m128d __A, __m128d __B)
286 {
287 __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
288 return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
289 }
290
291 static __inline __m128d __attribute__((__always_inline__))
292 _mm_min_pd (__m128d __A, __m128d __B)
293 {
294 return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
295 }
296
297 static __inline __m128d __attribute__((__always_inline__))
298 _mm_min_sd (__m128d __A, __m128d __B)
299 {
300 return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
301 }
302
303 static __inline __m128d __attribute__((__always_inline__))
304 _mm_max_pd (__m128d __A, __m128d __B)
305 {
306 return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
307 }
308
309 static __inline __m128d __attribute__((__always_inline__))
310 _mm_max_sd (__m128d __A, __m128d __B)
311 {
312 return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
313 }
314
315 static __inline __m128d __attribute__((__always_inline__))
316 _mm_and_pd (__m128d __A, __m128d __B)
317 {
318 return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
319 }
320
321 static __inline __m128d __attribute__((__always_inline__))
322 _mm_andnot_pd (__m128d __A, __m128d __B)
323 {
324 return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
325 }
326
327 static __inline __m128d __attribute__((__always_inline__))
328 _mm_or_pd (__m128d __A, __m128d __B)
329 {
330 return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
331 }
332
333 static __inline __m128d __attribute__((__always_inline__))
334 _mm_xor_pd (__m128d __A, __m128d __B)
335 {
336 return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
337 }
338
339 static __inline __m128d __attribute__((__always_inline__))
340 _mm_cmpeq_pd (__m128d __A, __m128d __B)
341 {
342 return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
343 }
344
345 static __inline __m128d __attribute__((__always_inline__))
346 _mm_cmplt_pd (__m128d __A, __m128d __B)
347 {
348 return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
349 }
350
351 static __inline __m128d __attribute__((__always_inline__))
352 _mm_cmple_pd (__m128d __A, __m128d __B)
353 {
354 return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
355 }
356
357 static __inline __m128d __attribute__((__always_inline__))
358 _mm_cmpgt_pd (__m128d __A, __m128d __B)
359 {
360 return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
361 }
362
363 static __inline __m128d __attribute__((__always_inline__))
364 _mm_cmpge_pd (__m128d __A, __m128d __B)
365 {
366 return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
367 }
368
369 static __inline __m128d __attribute__((__always_inline__))
370 _mm_cmpneq_pd (__m128d __A, __m128d __B)
371 {
372 return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
373 }
374
375 static __inline __m128d __attribute__((__always_inline__))
376 _mm_cmpnlt_pd (__m128d __A, __m128d __B)
377 {
378 return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
379 }
380
381 static __inline __m128d __attribute__((__always_inline__))
382 _mm_cmpnle_pd (__m128d __A, __m128d __B)
383 {
384 return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
385 }
386
387 static __inline __m128d __attribute__((__always_inline__))
388 _mm_cmpngt_pd (__m128d __A, __m128d __B)
389 {
390 return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
391 }
392
393 static __inline __m128d __attribute__((__always_inline__))
394 _mm_cmpnge_pd (__m128d __A, __m128d __B)
395 {
396 return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
397 }
398
399 static __inline __m128d __attribute__((__always_inline__))
400 _mm_cmpord_pd (__m128d __A, __m128d __B)
401 {
402 return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
403 }
404
405 static __inline __m128d __attribute__((__always_inline__))
406 _mm_cmpunord_pd (__m128d __A, __m128d __B)
407 {
408 return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
409 }
410
411 static __inline __m128d __attribute__((__always_inline__))
412 _mm_cmpeq_sd (__m128d __A, __m128d __B)
413 {
414 return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
415 }
416
417 static __inline __m128d __attribute__((__always_inline__))
418 _mm_cmplt_sd (__m128d __A, __m128d __B)
419 {
420 return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
421 }
422
423 static __inline __m128d __attribute__((__always_inline__))
424 _mm_cmple_sd (__m128d __A, __m128d __B)
425 {
426 return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
427 }
428
429 static __inline __m128d __attribute__((__always_inline__))
430 _mm_cmpgt_sd (__m128d __A, __m128d __B)
431 {
432 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
433 (__v2df)
434 __builtin_ia32_cmpltsd ((__v2df) __B,
435 (__v2df)
436 __A));
437 }
438
439 static __inline __m128d __attribute__((__always_inline__))
440 _mm_cmpge_sd (__m128d __A, __m128d __B)
441 {
442 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
443 (__v2df)
444 __builtin_ia32_cmplesd ((__v2df) __B,
445 (__v2df)
446 __A));
447 }
448
449 static __inline __m128d __attribute__((__always_inline__))
450 _mm_cmpneq_sd (__m128d __A, __m128d __B)
451 {
452 return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
453 }
454
455 static __inline __m128d __attribute__((__always_inline__))
456 _mm_cmpnlt_sd (__m128d __A, __m128d __B)
457 {
458 return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
459 }
460
461 static __inline __m128d __attribute__((__always_inline__))
462 _mm_cmpnle_sd (__m128d __A, __m128d __B)
463 {
464 return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
465 }
466
467 static __inline __m128d __attribute__((__always_inline__))
468 _mm_cmpngt_sd (__m128d __A, __m128d __B)
469 {
470 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
471 (__v2df)
472 __builtin_ia32_cmpnltsd ((__v2df) __B,
473 (__v2df)
474 __A));
475 }
476
477 static __inline __m128d __attribute__((__always_inline__))
478 _mm_cmpnge_sd (__m128d __A, __m128d __B)
479 {
480 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
481 (__v2df)
482 __builtin_ia32_cmpnlesd ((__v2df) __B,
483 (__v2df)
484 __A));
485 }
486
487 static __inline __m128d __attribute__((__always_inline__))
488 _mm_cmpord_sd (__m128d __A, __m128d __B)
489 {
490 return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
491 }
492
493 static __inline __m128d __attribute__((__always_inline__))
494 _mm_cmpunord_sd (__m128d __A, __m128d __B)
495 {
496 return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
497 }
498
499 static __inline int __attribute__((__always_inline__))
500 _mm_comieq_sd (__m128d __A, __m128d __B)
501 {
502 return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
503 }
504
505 static __inline int __attribute__((__always_inline__))
506 _mm_comilt_sd (__m128d __A, __m128d __B)
507 {
508 return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
509 }
510
511 static __inline int __attribute__((__always_inline__))
512 _mm_comile_sd (__m128d __A, __m128d __B)
513 {
514 return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
515 }
516
517 static __inline int __attribute__((__always_inline__))
518 _mm_comigt_sd (__m128d __A, __m128d __B)
519 {
520 return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
521 }
522
523 static __inline int __attribute__((__always_inline__))
524 _mm_comige_sd (__m128d __A, __m128d __B)
525 {
526 return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
527 }
528
529 static __inline int __attribute__((__always_inline__))
530 _mm_comineq_sd (__m128d __A, __m128d __B)
531 {
532 return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
533 }
534
535 static __inline int __attribute__((__always_inline__))
536 _mm_ucomieq_sd (__m128d __A, __m128d __B)
537 {
538 return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
539 }
540
541 static __inline int __attribute__((__always_inline__))
542 _mm_ucomilt_sd (__m128d __A, __m128d __B)
543 {
544 return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
545 }
546
547 static __inline int __attribute__((__always_inline__))
548 _mm_ucomile_sd (__m128d __A, __m128d __B)
549 {
550 return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
551 }
552
553 static __inline int __attribute__((__always_inline__))
554 _mm_ucomigt_sd (__m128d __A, __m128d __B)
555 {
556 return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
557 }
558
559 static __inline int __attribute__((__always_inline__))
560 _mm_ucomige_sd (__m128d __A, __m128d __B)
561 {
562 return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
563 }
564
565 static __inline int __attribute__((__always_inline__))
566 _mm_ucomineq_sd (__m128d __A, __m128d __B)
567 {
568 return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
569 }
570
571 /* Create a vector of Qi, where i is the element number. */
572
573 static __inline __m128i __attribute__((__always_inline__))
574 _mm_set_epi64x (long long __q1, long long __q0)
575 {
576 return __extension__ (__m128i)(__v2di){ __q0, __q1 };
577 }
578
579 static __inline __m128i __attribute__((__always_inline__))
580 _mm_set_epi64 (__m64 __q1, __m64 __q0)
581 {
582 return _mm_set_epi64x ((long long)__q1, (long long)__q0);
583 }
584
585 static __inline __m128i __attribute__((__always_inline__))
586 _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
587 {
588 return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
589 }
590
591 static __inline __m128i __attribute__((__always_inline__))
592 _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
593 short __q3, short __q2, short __q1, short __q0)
594 {
595 return __extension__ (__m128i)(__v8hi){
596 __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
597 }
598
599 static __inline __m128i __attribute__((__always_inline__))
600 _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
601 char __q11, char __q10, char __q09, char __q08,
602 char __q07, char __q06, char __q05, char __q04,
603 char __q03, char __q02, char __q01, char __q00)
604 {
605 return __extension__ (__m128i)(__v16qi){
606 __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
607 __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
608 };
609 }
610
611 /* Set all of the elements of the vector to A. */
612
613 static __inline __m128i __attribute__((__always_inline__))
614 _mm_set1_epi64x (long long __A)
615 {
616 return _mm_set_epi64x (__A, __A);
617 }
618
619 static __inline __m128i __attribute__((__always_inline__))
620 _mm_set1_epi64 (__m64 __A)
621 {
622 return _mm_set_epi64 (__A, __A);
623 }
624
625 static __inline __m128i __attribute__((__always_inline__))
626 _mm_set1_epi32 (int __A)
627 {
628 return _mm_set_epi32 (__A, __A, __A, __A);
629 }
630
631 static __inline __m128i __attribute__((__always_inline__))
632 _mm_set1_epi16 (short __A)
633 {
634 return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
635 }
636
637 static __inline __m128i __attribute__((__always_inline__))
638 _mm_set1_epi8 (char __A)
639 {
640 return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
641 __A, __A, __A, __A, __A, __A, __A, __A);
642 }
643
644 /* Create a vector of Qi, where i is the element number.
645 The parameter order is reversed from the _mm_set_epi* functions. */
646
647 static __inline __m128i __attribute__((__always_inline__))
648 _mm_setr_epi64 (__m64 __q0, __m64 __q1)
649 {
650 return _mm_set_epi64 (__q1, __q0);
651 }
652
653 static __inline __m128i __attribute__((__always_inline__))
654 _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
655 {
656 return _mm_set_epi32 (__q3, __q2, __q1, __q0);
657 }
658
659 static __inline __m128i __attribute__((__always_inline__))
660 _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
661 short __q4, short __q5, short __q6, short __q7)
662 {
663 return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
664 }
665
666 static __inline __m128i __attribute__((__always_inline__))
667 _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
668 char __q04, char __q05, char __q06, char __q07,
669 char __q08, char __q09, char __q10, char __q11,
670 char __q12, char __q13, char __q14, char __q15)
671 {
672 return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
673 __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
674 }
675
676 /* Create a vector with element 0 as *P and the rest zero. */
677
678 static __inline __m128i __attribute__((__always_inline__))
679 _mm_load_si128 (__m128i const *__P)
680 {
681 return *__P;
682 }
683
684 static __inline __m128i __attribute__((__always_inline__))
685 _mm_loadu_si128 (__m128i const *__P)
686 {
687 return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
688 }
689
690 static __inline __m128i __attribute__((__always_inline__))
691 _mm_loadl_epi64 (__m128i const *__P)
692 {
693 return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
694 }
695
696 static __inline void __attribute__((__always_inline__))
697 _mm_store_si128 (__m128i *__P, __m128i __B)
698 {
699 *__P = __B;
700 }
701
702 static __inline void __attribute__((__always_inline__))
703 _mm_storeu_si128 (__m128i *__P, __m128i __B)
704 {
705 __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
706 }
707
708 static __inline void __attribute__((__always_inline__))
709 _mm_storel_epi64 (__m128i *__P, __m128i __B)
710 {
711 *(long long *)__P = __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
712 }
713
714 static __inline __m64 __attribute__((__always_inline__))
715 _mm_movepi64_pi64 (__m128i __B)
716 {
717 return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
718 }
719
720 static __inline __m128i __attribute__((__always_inline__))
721 _mm_movpi64_epi64 (__m64 __A)
722 {
723 return _mm_set_epi64 ((__m64)0LL, __A);
724 }
725
726 static __inline __m128i __attribute__((__always_inline__))
727 _mm_move_epi64 (__m128i __A)
728 {
729 return _mm_set_epi64 ((__m64)0LL, _mm_movepi64_pi64 (__A));
730 }
731
732 /* Create a vector of zeros. */
733 static __inline __m128i __attribute__((__always_inline__))
734 _mm_setzero_si128 (void)
735 {
736 return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
737 }
738
739 static __inline __m128d __attribute__((__always_inline__))
740 _mm_cvtepi32_pd (__m128i __A)
741 {
742 return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
743 }
744
745 static __inline __m128 __attribute__((__always_inline__))
746 _mm_cvtepi32_ps (__m128i __A)
747 {
748 return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
749 }
750
751 static __inline __m128i __attribute__((__always_inline__))
752 _mm_cvtpd_epi32 (__m128d __A)
753 {
754 return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
755 }
756
757 static __inline __m64 __attribute__((__always_inline__))
758 _mm_cvtpd_pi32 (__m128d __A)
759 {
760 return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
761 }
762
763 static __inline __m128 __attribute__((__always_inline__))
764 _mm_cvtpd_ps (__m128d __A)
765 {
766 return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
767 }
768
769 static __inline __m128i __attribute__((__always_inline__))
770 _mm_cvttpd_epi32 (__m128d __A)
771 {
772 return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
773 }
774
775 static __inline __m64 __attribute__((__always_inline__))
776 _mm_cvttpd_pi32 (__m128d __A)
777 {
778 return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
779 }
780
781 static __inline __m128d __attribute__((__always_inline__))
782 _mm_cvtpi32_pd (__m64 __A)
783 {
784 return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
785 }
786
787 static __inline __m128i __attribute__((__always_inline__))
788 _mm_cvtps_epi32 (__m128 __A)
789 {
790 return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
791 }
792
793 static __inline __m128i __attribute__((__always_inline__))
794 _mm_cvttps_epi32 (__m128 __A)
795 {
796 return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
797 }
798
799 static __inline __m128d __attribute__((__always_inline__))
800 _mm_cvtps_pd (__m128 __A)
801 {
802 return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
803 }
804
805 static __inline int __attribute__((__always_inline__))
806 _mm_cvtsd_si32 (__m128d __A)
807 {
808 return __builtin_ia32_cvtsd2si ((__v2df) __A);
809 }
810
811 #ifdef __x86_64__
812 /* Intel intrinsic. */
813 static __inline long long __attribute__((__always_inline__))
814 _mm_cvtsd_si64 (__m128d __A)
815 {
816 return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
817 }
818
819 /* Microsoft intrinsic. */
820 static __inline long long __attribute__((__always_inline__))
821 _mm_cvtsd_si64x (__m128d __A)
822 {
823 return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
824 }
825 #endif
826
827 static __inline int __attribute__((__always_inline__))
828 _mm_cvttsd_si32 (__m128d __A)
829 {
830 return __builtin_ia32_cvttsd2si ((__v2df) __A);
831 }
832
833 #ifdef __x86_64__
834 /* Intel intrinsic. */
835 static __inline long long __attribute__((__always_inline__))
836 _mm_cvttsd_si64 (__m128d __A)
837 {
838 return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
839 }
840
841 /* Microsoft intrinsic. */
842 static __inline long long __attribute__((__always_inline__))
843 _mm_cvttsd_si64x (__m128d __A)
844 {
845 return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
846 }
847 #endif
848
849 static __inline __m128 __attribute__((__always_inline__))
850 _mm_cvtsd_ss (__m128 __A, __m128d __B)
851 {
852 return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
853 }
854
855 static __inline __m128d __attribute__((__always_inline__))
856 _mm_cvtsi32_sd (__m128d __A, int __B)
857 {
858 return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
859 }
860
861 #ifdef __x86_64__
862 /* Intel intrinsic. */
863 static __inline __m128d __attribute__((__always_inline__))
864 _mm_cvtsi64_sd (__m128d __A, long long __B)
865 {
866 return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
867 }
868
869 /* Microsoft intrinsic. */
870 static __inline __m128d __attribute__((__always_inline__))
871 _mm_cvtsi64x_sd (__m128d __A, long long __B)
872 {
873 return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
874 }
875 #endif
876
877 static __inline __m128d __attribute__((__always_inline__))
878 _mm_cvtss_sd (__m128d __A, __m128 __B)
879 {
880 return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
881 }
882
883 #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
884
885 static __inline __m128d __attribute__((__always_inline__))
886 _mm_unpackhi_pd (__m128d __A, __m128d __B)
887 {
888 return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
889 }
890
891 static __inline __m128d __attribute__((__always_inline__))
892 _mm_unpacklo_pd (__m128d __A, __m128d __B)
893 {
894 return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
895 }
896
897 static __inline __m128d __attribute__((__always_inline__))
898 _mm_loadh_pd (__m128d __A, double const *__B)
899 {
900 return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B);
901 }
902
903 static __inline __m128d __attribute__((__always_inline__))
904 _mm_loadl_pd (__m128d __A, double const *__B)
905 {
906 return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B);
907 }
908
909 static __inline int __attribute__((__always_inline__))
910 _mm_movemask_pd (__m128d __A)
911 {
912 return __builtin_ia32_movmskpd ((__v2df)__A);
913 }
914
915 static __inline __m128i __attribute__((__always_inline__))
916 _mm_packs_epi16 (__m128i __A, __m128i __B)
917 {
918 return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
919 }
920
921 static __inline __m128i __attribute__((__always_inline__))
922 _mm_packs_epi32 (__m128i __A, __m128i __B)
923 {
924 return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
925 }
926
927 static __inline __m128i __attribute__((__always_inline__))
928 _mm_packus_epi16 (__m128i __A, __m128i __B)
929 {
930 return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
931 }
932
933 static __inline __m128i __attribute__((__always_inline__))
934 _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
935 {
936 return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
937 }
938
939 static __inline __m128i __attribute__((__always_inline__))
940 _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
941 {
942 return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
943 }
944
945 static __inline __m128i __attribute__((__always_inline__))
946 _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
947 {
948 return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
949 }
950
951 static __inline __m128i __attribute__((__always_inline__))
952 _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
953 {
954 return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
955 }
956
957 static __inline __m128i __attribute__((__always_inline__))
958 _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
959 {
960 return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
961 }
962
963 static __inline __m128i __attribute__((__always_inline__))
964 _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
965 {
966 return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
967 }
968
969 static __inline __m128i __attribute__((__always_inline__))
970 _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
971 {
972 return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
973 }
974
975 static __inline __m128i __attribute__((__always_inline__))
976 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
977 {
978 return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
979 }
980
981 static __inline __m128i __attribute__((__always_inline__))
982 _mm_add_epi8 (__m128i __A, __m128i __B)
983 {
984 return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
985 }
986
987 static __inline __m128i __attribute__((__always_inline__))
988 _mm_add_epi16 (__m128i __A, __m128i __B)
989 {
990 return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
991 }
992
993 static __inline __m128i __attribute__((__always_inline__))
994 _mm_add_epi32 (__m128i __A, __m128i __B)
995 {
996 return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
997 }
998
999 static __inline __m128i __attribute__((__always_inline__))
1000 _mm_add_epi64 (__m128i __A, __m128i __B)
1001 {
1002 return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
1003 }
1004
1005 static __inline __m128i __attribute__((__always_inline__))
1006 _mm_adds_epi8 (__m128i __A, __m128i __B)
1007 {
1008 return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
1009 }
1010
1011 static __inline __m128i __attribute__((__always_inline__))
1012 _mm_adds_epi16 (__m128i __A, __m128i __B)
1013 {
1014 return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
1015 }
1016
1017 static __inline __m128i __attribute__((__always_inline__))
1018 _mm_adds_epu8 (__m128i __A, __m128i __B)
1019 {
1020 return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
1021 }
1022
1023 static __inline __m128i __attribute__((__always_inline__))
1024 _mm_adds_epu16 (__m128i __A, __m128i __B)
1025 {
1026 return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
1027 }
1028
1029 static __inline __m128i __attribute__((__always_inline__))
1030 _mm_sub_epi8 (__m128i __A, __m128i __B)
1031 {
1032 return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
1033 }
1034
1035 static __inline __m128i __attribute__((__always_inline__))
1036 _mm_sub_epi16 (__m128i __A, __m128i __B)
1037 {
1038 return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
1039 }
1040
1041 static __inline __m128i __attribute__((__always_inline__))
1042 _mm_sub_epi32 (__m128i __A, __m128i __B)
1043 {
1044 return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
1045 }
1046
1047 static __inline __m128i __attribute__((__always_inline__))
1048 _mm_sub_epi64 (__m128i __A, __m128i __B)
1049 {
1050 return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
1051 }
1052
1053 static __inline __m128i __attribute__((__always_inline__))
1054 _mm_subs_epi8 (__m128i __A, __m128i __B)
1055 {
1056 return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
1057 }
1058
1059 static __inline __m128i __attribute__((__always_inline__))
1060 _mm_subs_epi16 (__m128i __A, __m128i __B)
1061 {
1062 return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
1063 }
1064
1065 static __inline __m128i __attribute__((__always_inline__))
1066 _mm_subs_epu8 (__m128i __A, __m128i __B)
1067 {
1068 return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
1069 }
1070
1071 static __inline __m128i __attribute__((__always_inline__))
1072 _mm_subs_epu16 (__m128i __A, __m128i __B)
1073 {
1074 return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
1075 }
1076
1077 static __inline __m128i __attribute__((__always_inline__))
1078 _mm_madd_epi16 (__m128i __A, __m128i __B)
1079 {
1080 return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
1081 }
1082
1083 static __inline __m128i __attribute__((__always_inline__))
1084 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
1085 {
1086 return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
1087 }
1088
1089 static __inline __m128i __attribute__((__always_inline__))
1090 _mm_mullo_epi16 (__m128i __A, __m128i __B)
1091 {
1092 return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
1093 }
1094
1095 static __inline __m64 __attribute__((__always_inline__))
1096 _mm_mul_su32 (__m64 __A, __m64 __B)
1097 {
1098 return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
1099 }
1100
1101 static __inline __m128i __attribute__((__always_inline__))
1102 _mm_mul_epu32 (__m128i __A, __m128i __B)
1103 {
1104 return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
1105 }
1106
1107 static __inline __m128i __attribute__((__always_inline__))
1108 _mm_slli_epi16 (__m128i __A, int __B)
1109 {
1110 return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
1111 }
1112
1113 static __inline __m128i __attribute__((__always_inline__))
1114 _mm_slli_epi32 (__m128i __A, int __B)
1115 {
1116 return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
1117 }
1118
1119 static __inline __m128i __attribute__((__always_inline__))
1120 _mm_slli_epi64 (__m128i __A, const int __B)
1121 {
1122 return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
1123 }
1124
1125 static __inline __m128i __attribute__((__always_inline__))
1126 _mm_srai_epi16 (__m128i __A, const int __B)
1127 {
1128 return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
1129 }
1130
1131 static __inline __m128i __attribute__((__always_inline__))
1132 _mm_srai_epi32 (__m128i __A, const int __B)
1133 {
1134 return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
1135 }
1136
1137 #if 0
1138 static __m128i __attribute__((__always_inline__))
1139 _mm_srli_si128 (__m128i __A, const int __B)
1140 {
1141 return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8));
1142 }
1143
1144 static __m128i __attribute__((__always_inline__))
1145 _mm_srli_si128 (__m128i __A, const int __B)
1146 {
1147 return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8));
1148 }
1149 #else
1150 #define _mm_srli_si128(__A, __B) \
1151 ((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
1152 #define _mm_slli_si128(__A, __B) \
1153 ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
1154 #endif
1155
1156 static __inline __m128i __attribute__((__always_inline__))
1157 _mm_srli_epi16 (__m128i __A, const int __B)
1158 {
1159 return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
1160 }
1161
1162 static __inline __m128i __attribute__((__always_inline__))
1163 _mm_srli_epi32 (__m128i __A, const int __B)
1164 {
1165 return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
1166 }
1167
1168 static __inline __m128i __attribute__((__always_inline__))
1169 _mm_srli_epi64 (__m128i __A, const int __B)
1170 {
1171 return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
1172 }
1173
1174 static __inline __m128i __attribute__((__always_inline__))
1175 _mm_sll_epi16 (__m128i __A, __m128i __B)
1176 {
1177 return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B);
1178 }
1179
1180 static __inline __m128i __attribute__((__always_inline__))
1181 _mm_sll_epi32 (__m128i __A, __m128i __B)
1182 {
1183 return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B);
1184 }
1185
1186 static __inline __m128i __attribute__((__always_inline__))
1187 _mm_sll_epi64 (__m128i __A, __m128i __B)
1188 {
1189 return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B);
1190 }
1191
1192 static __inline __m128i __attribute__((__always_inline__))
1193 _mm_sra_epi16 (__m128i __A, __m128i __B)
1194 {
1195 return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B);
1196 }
1197
1198 static __inline __m128i __attribute__((__always_inline__))
1199 _mm_sra_epi32 (__m128i __A, __m128i __B)
1200 {
1201 return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B);
1202 }
1203
1204 static __inline __m128i __attribute__((__always_inline__))
1205 _mm_srl_epi16 (__m128i __A, __m128i __B)
1206 {
1207 return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B);
1208 }
1209
1210 static __inline __m128i __attribute__((__always_inline__))
1211 _mm_srl_epi32 (__m128i __A, __m128i __B)
1212 {
1213 return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B);
1214 }
1215
1216 static __inline __m128i __attribute__((__always_inline__))
1217 _mm_srl_epi64 (__m128i __A, __m128i __B)
1218 {
1219 return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
1220 }
1221
1222 static __inline __m128i __attribute__((__always_inline__))
1223 _mm_and_si128 (__m128i __A, __m128i __B)
1224 {
1225 return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
1226 }
1227
1228 static __inline __m128i __attribute__((__always_inline__))
1229 _mm_andnot_si128 (__m128i __A, __m128i __B)
1230 {
1231 return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
1232 }
1233
1234 static __inline __m128i __attribute__((__always_inline__))
1235 _mm_or_si128 (__m128i __A, __m128i __B)
1236 {
1237 return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
1238 }
1239
1240 static __inline __m128i __attribute__((__always_inline__))
1241 _mm_xor_si128 (__m128i __A, __m128i __B)
1242 {
1243 return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
1244 }
1245
1246 static __inline __m128i __attribute__((__always_inline__))
1247 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
1248 {
1249 return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
1250 }
1251
1252 static __inline __m128i __attribute__((__always_inline__))
1253 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
1254 {
1255 return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
1256 }
1257
1258 static __inline __m128i __attribute__((__always_inline__))
1259 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
1260 {
1261 return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
1262 }
1263
1264 static __inline __m128i __attribute__((__always_inline__))
1265 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
1266 {
1267 return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
1268 }
1269
1270 static __inline __m128i __attribute__((__always_inline__))
1271 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
1272 {
1273 return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
1274 }
1275
1276 static __inline __m128i __attribute__((__always_inline__))
1277 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
1278 {
1279 return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
1280 }
1281
1282 static __inline __m128i __attribute__((__always_inline__))
1283 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
1284 {
1285 return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
1286 }
1287
1288 static __inline __m128i __attribute__((__always_inline__))
1289 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
1290 {
1291 return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
1292 }
1293
1294 static __inline __m128i __attribute__((__always_inline__))
1295 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
1296 {
1297 return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
1298 }
1299
1300 #if 0
1301 static __inline int __attribute__((__always_inline__))
1302 _mm_extract_epi16 (__m128i const __A, int const __N)
1303 {
1304 return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N);
1305 }
1306
1307 static __inline __m128i __attribute__((__always_inline__))
1308 _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
1309 {
1310 return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
1311 }
1312 #else
1313 #define _mm_extract_epi16(A, N) \
1314 ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
1315 #define _mm_insert_epi16(A, D, N) \
1316 ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
1317 #endif
1318
1319 static __inline __m128i __attribute__((__always_inline__))
1320 _mm_max_epi16 (__m128i __A, __m128i __B)
1321 {
1322 return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
1323 }
1324
1325 static __inline __m128i __attribute__((__always_inline__))
1326 _mm_max_epu8 (__m128i __A, __m128i __B)
1327 {
1328 return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
1329 }
1330
1331 static __inline __m128i __attribute__((__always_inline__))
1332 _mm_min_epi16 (__m128i __A, __m128i __B)
1333 {
1334 return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
1335 }
1336
1337 static __inline __m128i __attribute__((__always_inline__))
1338 _mm_min_epu8 (__m128i __A, __m128i __B)
1339 {
1340 return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
1341 }
1342
1343 static __inline int __attribute__((__always_inline__))
1344 _mm_movemask_epi8 (__m128i __A)
1345 {
1346 return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
1347 }
1348
1349 static __inline __m128i __attribute__((__always_inline__))
1350 _mm_mulhi_epu16 (__m128i __A, __m128i __B)
1351 {
1352 return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
1353 }
1354
1355 #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
1356 #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
1357 #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
1358
1359 static __inline void __attribute__((__always_inline__))
1360 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
1361 {
1362 __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
1363 }
1364
1365 static __inline __m128i __attribute__((__always_inline__))
1366 _mm_avg_epu8 (__m128i __A, __m128i __B)
1367 {
1368 return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
1369 }
1370
1371 static __inline __m128i __attribute__((__always_inline__))
1372 _mm_avg_epu16 (__m128i __A, __m128i __B)
1373 {
1374 return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
1375 }
1376
1377 static __inline __m128i __attribute__((__always_inline__))
1378 _mm_sad_epu8 (__m128i __A, __m128i __B)
1379 {
1380 return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
1381 }
1382
1383 static __inline void __attribute__((__always_inline__))
1384 _mm_stream_si32 (int *__A, int __B)
1385 {
1386 __builtin_ia32_movnti (__A, __B);
1387 }
1388
1389 static __inline void __attribute__((__always_inline__))
1390 _mm_stream_si128 (__m128i *__A, __m128i __B)
1391 {
1392 __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
1393 }
1394
1395 static __inline void __attribute__((__always_inline__))
1396 _mm_stream_pd (double *__A, __m128d __B)
1397 {
1398 __builtin_ia32_movntpd (__A, (__v2df)__B);
1399 }
1400
1401 static __inline void __attribute__((__always_inline__))
1402 _mm_clflush (void const *__A)
1403 {
1404 __builtin_ia32_clflush (__A);
1405 }
1406
1407 static __inline void __attribute__((__always_inline__))
1408 _mm_lfence (void)
1409 {
1410 __builtin_ia32_lfence ();
1411 }
1412
1413 static __inline void __attribute__((__always_inline__))
1414 _mm_mfence (void)
1415 {
1416 __builtin_ia32_mfence ();
1417 }
1418
1419 static __inline __m128i __attribute__((__always_inline__))
1420 _mm_cvtsi32_si128 (int __A)
1421 {
1422 return _mm_set_epi32 (0, 0, 0, __A);
1423 }
1424
1425 #ifdef __x86_64__
1426 /* Intel intrinsic. */
1427 static __inline __m128i __attribute__((__always_inline__))
1428 _mm_cvtsi64_si128 (long long __A)
1429 {
1430 return _mm_set_epi64x (0, __A);
1431 }
1432
1433 /* Microsoft intrinsic. */
1434 static __inline __m128i __attribute__((__always_inline__))
1435 _mm_cvtsi64x_si128 (long long __A)
1436 {
1437 return _mm_set_epi64x (0, __A);
1438 }
1439 #endif
1440
1441 /* Casts between various SP, DP, INT vector types. Note that these do no
1442 conversion of values, they just change the type. */
1443 static __inline __m128 __attribute__((__always_inline__))
1444 _mm_castpd_ps(__m128d __A)
1445 {
1446 return (__m128) __A;
1447 }
1448
1449 static __inline __m128i __attribute__((__always_inline__))
1450 _mm_castpd_si128(__m128d __A)
1451 {
1452 return (__m128i) __A;
1453 }
1454
1455 static __inline __m128d __attribute__((__always_inline__))
1456 _mm_castps_pd(__m128 __A)
1457 {
1458 return (__m128d) __A;
1459 }
1460
1461 static __inline __m128i __attribute__((__always_inline__))
1462 _mm_castps_si128(__m128 __A)
1463 {
1464 return (__m128i) __A;
1465 }
1466
1467 static __inline __m128 __attribute__((__always_inline__))
1468 _mm_castsi128_ps(__m128i __A)
1469 {
1470 return (__m128) __A;
1471 }
1472
1473 static __inline __m128d __attribute__((__always_inline__))
1474 _mm_castsi128_pd(__m128i __A)
1475 {
1476 return (__m128d) __A;
1477 }
1478
1479 #endif /* __SSE2__ */
1480
1481 #endif /* _EMMINTRIN_H_INCLUDED */