]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i386/xmmintrin.h
PR target/29096
[thirdparty/gcc.git] / gcc / config / i386 / xmmintrin.h
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
2 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19 Boston, MA 02110-1301, USA. */
20
21 /* As a special exception, if you include this header file into source
22 files compiled by GCC, this header file does not by itself cause
23 the resulting executable to be covered by the GNU General Public
24 License. This exception does not however invalidate any other
25 reasons why the executable file might be covered by the GNU General
26 Public License. */
27
28 /* Implemented from the specification included in the Intel C++ Compiler
29 User Guide and Reference, version 9.0. */
30
31 #ifndef _XMMINTRIN_H_INCLUDED
32 #define _XMMINTRIN_H_INCLUDED
33
34 #ifndef __SSE__
35 # error "SSE instruction set not enabled"
36 #else
37
38 /* We need type definitions from the MMX header file. */
39 #include <mmintrin.h>
40
41 /* Get _mm_malloc () and _mm_free (). */
42 #include <mm_malloc.h>
43
44 /* The Intel API is flexible enough that we must allow aliasing with other
45 vector types, and their scalar components. */
46 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
47
48 /* Internal data types for implementing the intrinsics. */
49 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
50
51 /* Create a selector for use with the SHUFPS instruction. */
52 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
53 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
54
55 /* Constants for use with _mm_prefetch. */
56 enum _mm_hint
57 {
58 _MM_HINT_T0 = 3,
59 _MM_HINT_T1 = 2,
60 _MM_HINT_T2 = 1,
61 _MM_HINT_NTA = 0
62 };
63
64 /* Bits in the MXCSR. */
65 #define _MM_EXCEPT_MASK 0x003f
66 #define _MM_EXCEPT_INVALID 0x0001
67 #define _MM_EXCEPT_DENORM 0x0002
68 #define _MM_EXCEPT_DIV_ZERO 0x0004
69 #define _MM_EXCEPT_OVERFLOW 0x0008
70 #define _MM_EXCEPT_UNDERFLOW 0x0010
71 #define _MM_EXCEPT_INEXACT 0x0020
72
73 #define _MM_MASK_MASK 0x1f80
74 #define _MM_MASK_INVALID 0x0080
75 #define _MM_MASK_DENORM 0x0100
76 #define _MM_MASK_DIV_ZERO 0x0200
77 #define _MM_MASK_OVERFLOW 0x0400
78 #define _MM_MASK_UNDERFLOW 0x0800
79 #define _MM_MASK_INEXACT 0x1000
80
81 #define _MM_ROUND_MASK 0x6000
82 #define _MM_ROUND_NEAREST 0x0000
83 #define _MM_ROUND_DOWN 0x2000
84 #define _MM_ROUND_UP 0x4000
85 #define _MM_ROUND_TOWARD_ZERO 0x6000
86
87 #define _MM_FLUSH_ZERO_MASK 0x8000
88 #define _MM_FLUSH_ZERO_ON 0x8000
89 #define _MM_FLUSH_ZERO_OFF 0x0000
90
91 /* Create a vector of zeros. */
92 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
93 _mm_setzero_ps (void)
94 {
95 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
96 }
97
98 /* Perform the respective operation on the lower SPFP (single-precision
99 floating-point) values of A and B; the upper three SPFP values are
100 passed through from A. */
101
102 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
103 _mm_add_ss (__m128 __A, __m128 __B)
104 {
105 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
106 }
107
108 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
109 _mm_sub_ss (__m128 __A, __m128 __B)
110 {
111 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
112 }
113
114 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
115 _mm_mul_ss (__m128 __A, __m128 __B)
116 {
117 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
118 }
119
120 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
121 _mm_div_ss (__m128 __A, __m128 __B)
122 {
123 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
124 }
125
126 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
127 _mm_sqrt_ss (__m128 __A)
128 {
129 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
130 }
131
132 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
133 _mm_rcp_ss (__m128 __A)
134 {
135 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
136 }
137
138 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
139 _mm_rsqrt_ss (__m128 __A)
140 {
141 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
142 }
143
144 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
145 _mm_min_ss (__m128 __A, __m128 __B)
146 {
147 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
148 }
149
150 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
151 _mm_max_ss (__m128 __A, __m128 __B)
152 {
153 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
154 }
155
156 /* Perform the respective operation on the four SPFP values in A and B. */
157
158 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
159 _mm_add_ps (__m128 __A, __m128 __B)
160 {
161 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
162 }
163
164 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
165 _mm_sub_ps (__m128 __A, __m128 __B)
166 {
167 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
168 }
169
170 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
171 _mm_mul_ps (__m128 __A, __m128 __B)
172 {
173 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
174 }
175
176 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
177 _mm_div_ps (__m128 __A, __m128 __B)
178 {
179 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
180 }
181
182 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
183 _mm_sqrt_ps (__m128 __A)
184 {
185 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
186 }
187
188 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
189 _mm_rcp_ps (__m128 __A)
190 {
191 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
192 }
193
194 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
195 _mm_rsqrt_ps (__m128 __A)
196 {
197 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
198 }
199
200 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
201 _mm_min_ps (__m128 __A, __m128 __B)
202 {
203 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
204 }
205
206 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
207 _mm_max_ps (__m128 __A, __m128 __B)
208 {
209 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
210 }
211
212 /* Perform logical bit-wise operations on 128-bit values. */
213
214 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
215 _mm_and_ps (__m128 __A, __m128 __B)
216 {
217 return __builtin_ia32_andps (__A, __B);
218 }
219
220 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
221 _mm_andnot_ps (__m128 __A, __m128 __B)
222 {
223 return __builtin_ia32_andnps (__A, __B);
224 }
225
226 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
227 _mm_or_ps (__m128 __A, __m128 __B)
228 {
229 return __builtin_ia32_orps (__A, __B);
230 }
231
232 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
233 _mm_xor_ps (__m128 __A, __m128 __B)
234 {
235 return __builtin_ia32_xorps (__A, __B);
236 }
237
238 /* Perform a comparison on the lower SPFP values of A and B. If the
239 comparison is true, place a mask of all ones in the result, otherwise a
240 mask of zeros. The upper three SPFP values are passed through from A. */
241
242 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
243 _mm_cmpeq_ss (__m128 __A, __m128 __B)
244 {
245 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
246 }
247
248 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
249 _mm_cmplt_ss (__m128 __A, __m128 __B)
250 {
251 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
252 }
253
254 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
255 _mm_cmple_ss (__m128 __A, __m128 __B)
256 {
257 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
258 }
259
260 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
261 _mm_cmpgt_ss (__m128 __A, __m128 __B)
262 {
263 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
264 (__v4sf)
265 __builtin_ia32_cmpltss ((__v4sf) __B,
266 (__v4sf)
267 __A));
268 }
269
270 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
271 _mm_cmpge_ss (__m128 __A, __m128 __B)
272 {
273 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
274 (__v4sf)
275 __builtin_ia32_cmpless ((__v4sf) __B,
276 (__v4sf)
277 __A));
278 }
279
280 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
281 _mm_cmpneq_ss (__m128 __A, __m128 __B)
282 {
283 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
284 }
285
286 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
287 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
288 {
289 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
290 }
291
292 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
293 _mm_cmpnle_ss (__m128 __A, __m128 __B)
294 {
295 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
296 }
297
298 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
299 _mm_cmpngt_ss (__m128 __A, __m128 __B)
300 {
301 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
302 (__v4sf)
303 __builtin_ia32_cmpnltss ((__v4sf) __B,
304 (__v4sf)
305 __A));
306 }
307
308 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
309 _mm_cmpnge_ss (__m128 __A, __m128 __B)
310 {
311 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
312 (__v4sf)
313 __builtin_ia32_cmpnless ((__v4sf) __B,
314 (__v4sf)
315 __A));
316 }
317
318 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
319 _mm_cmpord_ss (__m128 __A, __m128 __B)
320 {
321 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
322 }
323
324 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
325 _mm_cmpunord_ss (__m128 __A, __m128 __B)
326 {
327 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
328 }
329
330 /* Perform a comparison on the four SPFP values of A and B. For each
331 element, if the comparison is true, place a mask of all ones in the
332 result, otherwise a mask of zeros. */
333
334 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
335 _mm_cmpeq_ps (__m128 __A, __m128 __B)
336 {
337 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
338 }
339
340 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
341 _mm_cmplt_ps (__m128 __A, __m128 __B)
342 {
343 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
344 }
345
346 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
347 _mm_cmple_ps (__m128 __A, __m128 __B)
348 {
349 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
350 }
351
352 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
353 _mm_cmpgt_ps (__m128 __A, __m128 __B)
354 {
355 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
356 }
357
358 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
359 _mm_cmpge_ps (__m128 __A, __m128 __B)
360 {
361 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
362 }
363
364 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
365 _mm_cmpneq_ps (__m128 __A, __m128 __B)
366 {
367 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
368 }
369
370 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
371 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
372 {
373 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
374 }
375
376 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
377 _mm_cmpnle_ps (__m128 __A, __m128 __B)
378 {
379 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
380 }
381
382 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
383 _mm_cmpngt_ps (__m128 __A, __m128 __B)
384 {
385 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
386 }
387
388 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
389 _mm_cmpnge_ps (__m128 __A, __m128 __B)
390 {
391 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
392 }
393
394 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
395 _mm_cmpord_ps (__m128 __A, __m128 __B)
396 {
397 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
398 }
399
400 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
401 _mm_cmpunord_ps (__m128 __A, __m128 __B)
402 {
403 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
404 }
405
406 /* Compare the lower SPFP values of A and B and return 1 if true
407 and 0 if false. */
408
409 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
410 _mm_comieq_ss (__m128 __A, __m128 __B)
411 {
412 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
413 }
414
415 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
416 _mm_comilt_ss (__m128 __A, __m128 __B)
417 {
418 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
419 }
420
421 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
422 _mm_comile_ss (__m128 __A, __m128 __B)
423 {
424 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
425 }
426
427 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
428 _mm_comigt_ss (__m128 __A, __m128 __B)
429 {
430 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
431 }
432
433 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
434 _mm_comige_ss (__m128 __A, __m128 __B)
435 {
436 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
437 }
438
439 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
440 _mm_comineq_ss (__m128 __A, __m128 __B)
441 {
442 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
443 }
444
445 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
446 _mm_ucomieq_ss (__m128 __A, __m128 __B)
447 {
448 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
449 }
450
451 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
452 _mm_ucomilt_ss (__m128 __A, __m128 __B)
453 {
454 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
455 }
456
457 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
458 _mm_ucomile_ss (__m128 __A, __m128 __B)
459 {
460 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
461 }
462
463 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
464 _mm_ucomigt_ss (__m128 __A, __m128 __B)
465 {
466 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
467 }
468
469 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
470 _mm_ucomige_ss (__m128 __A, __m128 __B)
471 {
472 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
473 }
474
475 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
476 _mm_ucomineq_ss (__m128 __A, __m128 __B)
477 {
478 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
479 }
480
481 /* Convert the lower SPFP value to a 32-bit integer according to the current
482 rounding mode. */
483 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
484 _mm_cvtss_si32 (__m128 __A)
485 {
486 return __builtin_ia32_cvtss2si ((__v4sf) __A);
487 }
488
489 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
490 _mm_cvt_ss2si (__m128 __A)
491 {
492 return _mm_cvtss_si32 (__A);
493 }
494
495 #ifdef __x86_64__
496 /* Convert the lower SPFP value to a 32-bit integer according to the
497 current rounding mode. */
498
499 /* Intel intrinsic. */
500 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
501 _mm_cvtss_si64 (__m128 __A)
502 {
503 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
504 }
505
506 /* Microsoft intrinsic. */
507 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
508 _mm_cvtss_si64x (__m128 __A)
509 {
510 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
511 }
512 #endif
513
514 /* Convert the two lower SPFP values to 32-bit integers according to the
515 current rounding mode. Return the integers in packed form. */
516 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
517 _mm_cvtps_pi32 (__m128 __A)
518 {
519 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
520 }
521
522 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
523 _mm_cvt_ps2pi (__m128 __A)
524 {
525 return _mm_cvtps_pi32 (__A);
526 }
527
528 /* Truncate the lower SPFP value to a 32-bit integer. */
529 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
530 _mm_cvttss_si32 (__m128 __A)
531 {
532 return __builtin_ia32_cvttss2si ((__v4sf) __A);
533 }
534
535 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
536 _mm_cvtt_ss2si (__m128 __A)
537 {
538 return _mm_cvttss_si32 (__A);
539 }
540
541 #ifdef __x86_64__
542 /* Truncate the lower SPFP value to a 32-bit integer. */
543
544 /* Intel intrinsic. */
545 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
546 _mm_cvttss_si64 (__m128 __A)
547 {
548 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
549 }
550
551 /* Microsoft intrinsic. */
552 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
553 _mm_cvttss_si64x (__m128 __A)
554 {
555 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
556 }
557 #endif
558
559 /* Truncate the two lower SPFP values to 32-bit integers. Return the
560 integers in packed form. */
561 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
562 _mm_cvttps_pi32 (__m128 __A)
563 {
564 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
565 }
566
567 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
568 _mm_cvtt_ps2pi (__m128 __A)
569 {
570 return _mm_cvttps_pi32 (__A);
571 }
572
573 /* Convert B to a SPFP value and insert it as element zero in A. */
574 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
575 _mm_cvtsi32_ss (__m128 __A, int __B)
576 {
577 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
578 }
579
580 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
581 _mm_cvt_si2ss (__m128 __A, int __B)
582 {
583 return _mm_cvtsi32_ss (__A, __B);
584 }
585
586 #ifdef __x86_64__
587 /* Convert B to a SPFP value and insert it as element zero in A. */
588
589 /* Intel intrinsic. */
590 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
591 _mm_cvtsi64_ss (__m128 __A, long long __B)
592 {
593 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
594 }
595
596 /* Microsoft intrinsic. */
597 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
598 _mm_cvtsi64x_ss (__m128 __A, long long __B)
599 {
600 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
601 }
602 #endif
603
604 /* Convert the two 32-bit values in B to SPFP form and insert them
605 as the two lower elements in A. */
606 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
607 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
608 {
609 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
610 }
611
612 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
613 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
614 {
615 return _mm_cvtpi32_ps (__A, __B);
616 }
617
618 /* Convert the four signed 16-bit values in A to SPFP form. */
619 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
620 _mm_cvtpi16_ps (__m64 __A)
621 {
622 __v4hi __sign;
623 __v2si __hisi, __losi;
624 __v4sf __zero, __ra, __rb;
625
626 /* This comparison against zero gives us a mask that can be used to
627 fill in the missing sign bits in the unpack operations below, so
628 that we get signed values after unpacking. */
629 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
630
631 /* Convert the four words to doublewords. */
632 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
633 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
634
635 /* Convert the doublewords to floating point two at a time. */
636 __zero = (__v4sf) _mm_setzero_ps ();
637 __ra = __builtin_ia32_cvtpi2ps (__zero, __hisi);
638 __rb = __builtin_ia32_cvtpi2ps (__ra, __losi);
639
640 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
641 }
642
643 /* Convert the four unsigned 16-bit values in A to SPFP form. */
644 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
645 _mm_cvtpu16_ps (__m64 __A)
646 {
647 __v2si __hisi, __losi;
648 __v4sf __zero, __ra, __rb;
649
650 /* Convert the four words to doublewords. */
651 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
652 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
653
654 /* Convert the doublewords to floating point two at a time. */
655 __zero = (__v4sf) _mm_setzero_ps ();
656 __ra = __builtin_ia32_cvtpi2ps (__zero, __hisi);
657 __rb = __builtin_ia32_cvtpi2ps (__ra, __losi);
658
659 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
660 }
661
662 /* Convert the low four signed 8-bit values in A to SPFP form. */
663 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
664 _mm_cvtpi8_ps (__m64 __A)
665 {
666 __v8qi __sign;
667
668 /* This comparison against zero gives us a mask that can be used to
669 fill in the missing sign bits in the unpack operations below, so
670 that we get signed values after unpacking. */
671 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
672
673 /* Convert the four low bytes to words. */
674 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
675
676 return _mm_cvtpi16_ps(__A);
677 }
678
679 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
680 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
681 _mm_cvtpu8_ps(__m64 __A)
682 {
683 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
684 return _mm_cvtpu16_ps(__A);
685 }
686
687 /* Convert the four signed 32-bit values in A and B to SPFP form. */
688 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
689 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
690 {
691 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
692 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
693 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
694 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
695 }
696
697 /* Convert the four SPFP values in A to four signed 16-bit integers. */
698 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
699 _mm_cvtps_pi16(__m128 __A)
700 {
701 __v4sf __hisf = (__v4sf)__A;
702 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
703 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
704 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
705 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
706 }
707
708 /* Convert the four SPFP values in A to four signed 8-bit integers. */
709 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
710 _mm_cvtps_pi8(__m128 __A)
711 {
712 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
713 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
714 }
715
716 /* Selects four specific SPFP values from A and B based on MASK. */
717 #ifdef __OPTIMIZE__
718 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
719 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
720 {
721 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
722 }
723 #else
724 #define _mm_shuffle_ps(A, B, MASK) \
725 ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
726 (__v4sf)(__m128)(B), (int)(MASK)))
727 #endif
728
729 /* Selects and interleaves the upper two SPFP values from A and B. */
730 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
731 _mm_unpackhi_ps (__m128 __A, __m128 __B)
732 {
733 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
734 }
735
736 /* Selects and interleaves the lower two SPFP values from A and B. */
737 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
738 _mm_unpacklo_ps (__m128 __A, __m128 __B)
739 {
740 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
741 }
742
743 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
744 the lower two values are passed through from A. */
745 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
746 _mm_loadh_pi (__m128 __A, __m64 const *__P)
747 {
748 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
749 }
750
751 /* Stores the upper two SPFP values of A into P. */
752 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
753 _mm_storeh_pi (__m64 *__P, __m128 __A)
754 {
755 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
756 }
757
758 /* Moves the upper two values of B into the lower two values of A. */
759 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
760 _mm_movehl_ps (__m128 __A, __m128 __B)
761 {
762 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
763 }
764
765 /* Moves the lower two values of B into the upper two values of A. */
766 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
767 _mm_movelh_ps (__m128 __A, __m128 __B)
768 {
769 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
770 }
771
772 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
773 the upper two values are passed through from A. */
774 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
775 _mm_loadl_pi (__m128 __A, __m64 const *__P)
776 {
777 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
778 }
779
780 /* Stores the lower two SPFP values of A into P. */
781 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
782 _mm_storel_pi (__m64 *__P, __m128 __A)
783 {
784 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
785 }
786
787 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
788 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
789 _mm_movemask_ps (__m128 __A)
790 {
791 return __builtin_ia32_movmskps ((__v4sf)__A);
792 }
793
794 /* Return the contents of the control register. */
795 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
796 _mm_getcsr (void)
797 {
798 return __builtin_ia32_stmxcsr ();
799 }
800
801 /* Read exception bits from the control register. */
802 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
803 _MM_GET_EXCEPTION_STATE (void)
804 {
805 return _mm_getcsr() & _MM_EXCEPT_MASK;
806 }
807
808 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
809 _MM_GET_EXCEPTION_MASK (void)
810 {
811 return _mm_getcsr() & _MM_MASK_MASK;
812 }
813
814 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
815 _MM_GET_ROUNDING_MODE (void)
816 {
817 return _mm_getcsr() & _MM_ROUND_MASK;
818 }
819
820 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
821 _MM_GET_FLUSH_ZERO_MODE (void)
822 {
823 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
824 }
825
826 /* Set the control register to I. */
827 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
828 _mm_setcsr (unsigned int __I)
829 {
830 __builtin_ia32_ldmxcsr (__I);
831 }
832
833 /* Set exception bits in the control register. */
834 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
835 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
836 {
837 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
838 }
839
840 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
841 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
842 {
843 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
844 }
845
846 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
847 _MM_SET_ROUNDING_MODE (unsigned int __mode)
848 {
849 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
850 }
851
852 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
853 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
854 {
855 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
856 }
857
858 /* Create a vector with element 0 as F and the rest zero. */
859 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
860 _mm_set_ss (float __F)
861 {
862 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
863 }
864
865 /* Create a vector with all four elements equal to F. */
866 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
867 _mm_set1_ps (float __F)
868 {
869 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
870 }
871
872 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
873 _mm_set_ps1 (float __F)
874 {
875 return _mm_set1_ps (__F);
876 }
877
878 /* Create a vector with element 0 as *P and the rest zero. */
879 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
880 _mm_load_ss (float const *__P)
881 {
882 return _mm_set_ss (*__P);
883 }
884
885 /* Create a vector with all four elements equal to *P. */
886 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
887 _mm_load1_ps (float const *__P)
888 {
889 return _mm_set1_ps (*__P);
890 }
891
892 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
893 _mm_load_ps1 (float const *__P)
894 {
895 return _mm_load1_ps (__P);
896 }
897
898 /* Load four SPFP values from P. The address must be 16-byte aligned. */
899 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
900 _mm_load_ps (float const *__P)
901 {
902 return (__m128) *(__v4sf *)__P;
903 }
904
905 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
906 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
907 _mm_loadu_ps (float const *__P)
908 {
909 return (__m128) __builtin_ia32_loadups (__P);
910 }
911
912 /* Load four SPFP values in reverse order. The address must be aligned. */
913 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
914 _mm_loadr_ps (float const *__P)
915 {
916 __v4sf __tmp = *(__v4sf *)__P;
917 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
918 }
919
920 /* Create the vector [Z Y X W]. */
921 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
922 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
923 {
924 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
925 }
926
927 /* Create the vector [W X Y Z]. */
928 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
929 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
930 {
931 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
932 }
933
934 /* Stores the lower SPFP value. */
935 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
936 _mm_store_ss (float *__P, __m128 __A)
937 {
938 *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
939 }
940
941 extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
942 _mm_cvtss_f32 (__m128 __A)
943 {
944 return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
945 }
946
947 /* Store four SPFP values. The address must be 16-byte aligned. */
948 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
949 _mm_store_ps (float *__P, __m128 __A)
950 {
951 *(__v4sf *)__P = (__v4sf)__A;
952 }
953
954 /* Store four SPFP values. The address need not be 16-byte aligned. */
955 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
956 _mm_storeu_ps (float *__P, __m128 __A)
957 {
958 __builtin_ia32_storeups (__P, (__v4sf)__A);
959 }
960
961 /* Store the lower SPFP value across four words. */
962 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
963 _mm_store1_ps (float *__P, __m128 __A)
964 {
965 __v4sf __va = (__v4sf)__A;
966 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
967 _mm_storeu_ps (__P, __tmp);
968 }
969
970 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
971 _mm_store_ps1 (float *__P, __m128 __A)
972 {
973 _mm_store1_ps (__P, __A);
974 }
975
976 /* Store four SPFP values in reverse order. The address must be aligned. */
977 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
978 _mm_storer_ps (float *__P, __m128 __A)
979 {
980 __v4sf __va = (__v4sf)__A;
981 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
982 _mm_store_ps (__P, __tmp);
983 }
984
985 /* Sets the low SPFP value of A from the low value of B. */
986 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
987 _mm_move_ss (__m128 __A, __m128 __B)
988 {
989 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
990 }
991
992 /* Extracts one of the four words of A. The selector N must be immediate. */
993 #ifdef __OPTIMIZE__
994 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
995 _mm_extract_pi16 (__m64 const __A, int const __N)
996 {
997 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
998 }
999
1000 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1001 _m_pextrw (__m64 const __A, int const __N)
1002 {
1003 return _mm_extract_pi16 (__A, __N);
1004 }
1005 #else
1006 #define _mm_extract_pi16(A, N) \
1007 ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1008
1009 #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1010 #endif
1011
1012 /* Inserts word D into one of four words of A. The selector N must be
1013 immediate. */
1014 #ifdef __OPTIMIZE__
1015 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1016 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1017 {
1018 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1019 }
1020
1021 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1022 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1023 {
1024 return _mm_insert_pi16 (__A, __D, __N);
1025 }
1026 #else
1027 #define _mm_insert_pi16(A, D, N) \
1028 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1029 (int)(D), (int)(N)))
1030
1031 #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1032 #endif
1033
1034 /* Compute the element-wise maximum of signed 16-bit values. */
1035 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1036 _mm_max_pi16 (__m64 __A, __m64 __B)
1037 {
1038 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1039 }
1040
1041 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1042 _m_pmaxsw (__m64 __A, __m64 __B)
1043 {
1044 return _mm_max_pi16 (__A, __B);
1045 }
1046
1047 /* Compute the element-wise maximum of unsigned 8-bit values. */
1048 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1049 _mm_max_pu8 (__m64 __A, __m64 __B)
1050 {
1051 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1052 }
1053
1054 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1055 _m_pmaxub (__m64 __A, __m64 __B)
1056 {
1057 return _mm_max_pu8 (__A, __B);
1058 }
1059
1060 /* Compute the element-wise minimum of signed 16-bit values. */
1061 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1062 _mm_min_pi16 (__m64 __A, __m64 __B)
1063 {
1064 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1065 }
1066
1067 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1068 _m_pminsw (__m64 __A, __m64 __B)
1069 {
1070 return _mm_min_pi16 (__A, __B);
1071 }
1072
1073 /* Compute the element-wise minimum of unsigned 8-bit values. */
1074 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1075 _mm_min_pu8 (__m64 __A, __m64 __B)
1076 {
1077 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1078 }
1079
1080 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1081 _m_pminub (__m64 __A, __m64 __B)
1082 {
1083 return _mm_min_pu8 (__A, __B);
1084 }
1085
1086 /* Create an 8-bit mask of the signs of 8-bit values. */
1087 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1088 _mm_movemask_pi8 (__m64 __A)
1089 {
1090 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1091 }
1092
1093 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1094 _m_pmovmskb (__m64 __A)
1095 {
1096 return _mm_movemask_pi8 (__A);
1097 }
1098
1099 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1100 in B and produce the high 16 bits of the 32-bit results. */
1101 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1102 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1103 {
1104 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1105 }
1106
1107 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1108 _m_pmulhuw (__m64 __A, __m64 __B)
1109 {
1110 return _mm_mulhi_pu16 (__A, __B);
1111 }
1112
1113 /* Return a combination of the four 16-bit values in A. The selector
1114 must be an immediate. */
1115 #ifdef __OPTIMIZE__
1116 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1117 _mm_shuffle_pi16 (__m64 __A, int const __N)
1118 {
1119 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1120 }
1121
1122 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1123 _m_pshufw (__m64 __A, int const __N)
1124 {
1125 return _mm_shuffle_pi16 (__A, __N);
1126 }
1127 #else
1128 #define _mm_shuffle_pi16(A, N) \
1129 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1130
1131 #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1132 #endif
1133
1134 /* Conditionally store byte elements of A into P. The high bit of each
1135 byte in the selector N determines whether the corresponding byte from
1136 A is stored. */
1137 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1138 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1139 {
1140 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1141 }
1142
1143 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1144 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1145 {
1146 _mm_maskmove_si64 (__A, __N, __P);
1147 }
1148
1149 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1150 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1151 _mm_avg_pu8 (__m64 __A, __m64 __B)
1152 {
1153 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1154 }
1155
1156 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1157 _m_pavgb (__m64 __A, __m64 __B)
1158 {
1159 return _mm_avg_pu8 (__A, __B);
1160 }
1161
1162 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1163 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1164 _mm_avg_pu16 (__m64 __A, __m64 __B)
1165 {
1166 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1167 }
1168
1169 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1170 _m_pavgw (__m64 __A, __m64 __B)
1171 {
1172 return _mm_avg_pu16 (__A, __B);
1173 }
1174
1175 /* Compute the sum of the absolute differences of the unsigned 8-bit
1176 values in A and B. Return the value in the lower 16-bit word; the
1177 upper words are cleared. */
1178 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1179 _mm_sad_pu8 (__m64 __A, __m64 __B)
1180 {
1181 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1182 }
1183
1184 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1185 _m_psadbw (__m64 __A, __m64 __B)
1186 {
1187 return _mm_sad_pu8 (__A, __B);
1188 }
1189
1190 /* Loads one cache line from address P to a location "closer" to the
1191 processor. The selector I specifies the type of prefetch operation. */
1192 #ifdef __OPTIMIZE__
1193 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1194 _mm_prefetch (const void *__P, enum _mm_hint __I)
1195 {
1196 __builtin_prefetch (__P, 0, __I);
1197 }
1198 #else
1199 #define _mm_prefetch(P, I) \
1200 __builtin_prefetch ((P), 0, (I))
1201 #endif
1202
1203 /* Stores the data in A to the address P without polluting the caches. */
1204 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1205 _mm_stream_pi (__m64 *__P, __m64 __A)
1206 {
1207 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1208 }
1209
1210 /* Likewise. The address must be 16-byte aligned. */
1211 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1212 _mm_stream_ps (float *__P, __m128 __A)
1213 {
1214 __builtin_ia32_movntps (__P, (__v4sf)__A);
1215 }
1216
1217 /* Guarantees that every preceding store is globally visible before
1218 any subsequent store. */
1219 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1220 _mm_sfence (void)
1221 {
1222 __builtin_ia32_sfence ();
1223 }
1224
1225 /* The execution of the next instruction is delayed by an implementation
1226 specific amount of time. The instruction does not modify the
1227 architectural state. */
1228 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1229 _mm_pause (void)
1230 {
1231 __asm__ __volatile__ ("rep; nop" : : );
1232 }
1233
1234 /* Transpose the 4x4 matrix composed of row[0-3]. */
1235 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1236 do { \
1237 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1238 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1239 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1240 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1241 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1242 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1243 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1244 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1245 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1246 } while (0)
1247
1248 /* For backward source compatibility. */
1249 #ifdef __SSE2__
1250 # include <emmintrin.h>
1251 #endif
1252
1253 #endif /* __SSE__ */
1254 #endif /* _XMMINTRIN_H_INCLUDED */