]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/i386/xmmintrin.h
Update copyright years.
[thirdparty/gcc.git] / gcc / config / i386 / xmmintrin.h
CommitLineData
f1717362 1/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
d65bfb46 2
af680fe2 3 This file is part of GCC.
d65bfb46 4
af680fe2 5 GCC is free software; you can redistribute it and/or modify
d65bfb46 6 it under the terms of the GNU General Public License as published by
6bc9506f 7 the Free Software Foundation; either version 3, or (at your option)
d65bfb46 8 any later version.
9
af680fe2 10 GCC is distributed in the hope that it will be useful,
d65bfb46 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
6bc9506f 15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
d65bfb46 23
24/* Implemented from the specification included in the Intel C++ Compiler
52fdc46e 25 User Guide and Reference, version 9.0. */
d65bfb46 26
27#ifndef _XMMINTRIN_H_INCLUDED
28#define _XMMINTRIN_H_INCLUDED
29
30/* We need type definitions from the MMX header file. */
31#include <mmintrin.h>
32
561866ed 33/* Get _mm_malloc () and _mm_free (). */
34#include <mm_malloc.h>
35
25e01811 36/* Constants for use with _mm_prefetch. */
37enum _mm_hint
38{
39 /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
40 _MM_HINT_ET0 = 7,
41 _MM_HINT_ET1 = 6,
42 _MM_HINT_T0 = 3,
43 _MM_HINT_T1 = 2,
44 _MM_HINT_T2 = 1,
45 _MM_HINT_NTA = 0
46};
47
48/* Loads one cache line from address P to a location "closer" to the
49 processor. The selector I specifies the type of prefetch operation. */
50#ifdef __OPTIMIZE__
51extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52_mm_prefetch (const void *__P, enum _mm_hint __I)
53{
54 __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
55}
56#else
57#define _mm_prefetch(P, I) \
58 __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
59#endif
60
ef21d40e 61#ifndef __SSE__
62#pragma GCC push_options
63#pragma GCC target("sse")
64#define __DISABLE_SSE__
65#endif /* __SSE__ */
66
0e960ba8 67/* The Intel API is flexible enough that we must allow aliasing with other
68 vector types, and their scalar components. */
69typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
d65bfb46 70
fcbfedc7 71/* Internal data types for implementing the intrinsics. */
aff6787f 72typedef float __v4sf __attribute__ ((__vector_size__ (16)));
d65bfb46 73
74/* Create a selector for use with the SHUFPS instruction. */
75#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
76 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
77
d65bfb46 78/* Bits in the MXCSR. */
79#define _MM_EXCEPT_MASK 0x003f
80#define _MM_EXCEPT_INVALID 0x0001
81#define _MM_EXCEPT_DENORM 0x0002
82#define _MM_EXCEPT_DIV_ZERO 0x0004
83#define _MM_EXCEPT_OVERFLOW 0x0008
84#define _MM_EXCEPT_UNDERFLOW 0x0010
85#define _MM_EXCEPT_INEXACT 0x0020
86
87#define _MM_MASK_MASK 0x1f80
88#define _MM_MASK_INVALID 0x0080
89#define _MM_MASK_DENORM 0x0100
90#define _MM_MASK_DIV_ZERO 0x0200
91#define _MM_MASK_OVERFLOW 0x0400
92#define _MM_MASK_UNDERFLOW 0x0800
93#define _MM_MASK_INEXACT 0x1000
94
95#define _MM_ROUND_MASK 0x6000
96#define _MM_ROUND_NEAREST 0x0000
97#define _MM_ROUND_DOWN 0x2000
98#define _MM_ROUND_UP 0x4000
99#define _MM_ROUND_TOWARD_ZERO 0x6000
100
101#define _MM_FLUSH_ZERO_MASK 0x8000
102#define _MM_FLUSH_ZERO_ON 0x8000
103#define _MM_FLUSH_ZERO_OFF 0x0000
104
0fc245cd 105/* Create an undefined vector. */
106extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
107_mm_undefined_ps (void)
108{
109 __m128 __Y = __Y;
110 return __Y;
111}
112
ad2c46cf 113/* Create a vector of zeros. */
517b0286 114extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 115_mm_setzero_ps (void)
116{
882b157f 117 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
ad2c46cf 118}
119
d65bfb46 120/* Perform the respective operation on the lower SPFP (single-precision
121 floating-point) values of A and B; the upper three SPFP values are
122 passed through from A. */
123
517b0286 124extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 125_mm_add_ss (__m128 __A, __m128 __B)
126{
127 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
128}
129
517b0286 130extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 131_mm_sub_ss (__m128 __A, __m128 __B)
132{
133 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
134}
135
517b0286 136extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 137_mm_mul_ss (__m128 __A, __m128 __B)
138{
139 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
140}
141
517b0286 142extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 143_mm_div_ss (__m128 __A, __m128 __B)
144{
145 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
146}
147
517b0286 148extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 149_mm_sqrt_ss (__m128 __A)
150{
151 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
152}
153
517b0286 154extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 155_mm_rcp_ss (__m128 __A)
156{
157 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
158}
159
517b0286 160extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 161_mm_rsqrt_ss (__m128 __A)
162{
163 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
164}
165
517b0286 166extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 167_mm_min_ss (__m128 __A, __m128 __B)
168{
169 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
170}
171
517b0286 172extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 173_mm_max_ss (__m128 __A, __m128 __B)
174{
175 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
176}
177
178/* Perform the respective operation on the four SPFP values in A and B. */
179
517b0286 180extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 181_mm_add_ps (__m128 __A, __m128 __B)
182{
d521a5b2 183 return (__m128) ((__v4sf)__A + (__v4sf)__B);
d65bfb46 184}
185
517b0286 186extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 187_mm_sub_ps (__m128 __A, __m128 __B)
188{
d521a5b2 189 return (__m128) ((__v4sf)__A - (__v4sf)__B);
d65bfb46 190}
191
517b0286 192extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 193_mm_mul_ps (__m128 __A, __m128 __B)
194{
d521a5b2 195 return (__m128) ((__v4sf)__A * (__v4sf)__B);
d65bfb46 196}
197
517b0286 198extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 199_mm_div_ps (__m128 __A, __m128 __B)
200{
d521a5b2 201 return (__m128) ((__v4sf)__A / (__v4sf)__B);
d65bfb46 202}
203
517b0286 204extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 205_mm_sqrt_ps (__m128 __A)
206{
207 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
208}
209
517b0286 210extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 211_mm_rcp_ps (__m128 __A)
212{
213 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
214}
215
517b0286 216extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 217_mm_rsqrt_ps (__m128 __A)
218{
219 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
220}
221
517b0286 222extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 223_mm_min_ps (__m128 __A, __m128 __B)
224{
225 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
226}
227
517b0286 228extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 229_mm_max_ps (__m128 __A, __m128 __B)
230{
231 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
232}
233
234/* Perform logical bit-wise operations on 128-bit values. */
235
517b0286 236extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 237_mm_and_ps (__m128 __A, __m128 __B)
238{
239 return __builtin_ia32_andps (__A, __B);
240}
241
517b0286 242extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 243_mm_andnot_ps (__m128 __A, __m128 __B)
244{
245 return __builtin_ia32_andnps (__A, __B);
246}
247
517b0286 248extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 249_mm_or_ps (__m128 __A, __m128 __B)
250{
251 return __builtin_ia32_orps (__A, __B);
252}
253
517b0286 254extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 255_mm_xor_ps (__m128 __A, __m128 __B)
256{
257 return __builtin_ia32_xorps (__A, __B);
258}
259
260/* Perform a comparison on the lower SPFP values of A and B. If the
261 comparison is true, place a mask of all ones in the result, otherwise a
262 mask of zeros. The upper three SPFP values are passed through from A. */
263
517b0286 264extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 265_mm_cmpeq_ss (__m128 __A, __m128 __B)
266{
267 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
268}
269
517b0286 270extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 271_mm_cmplt_ss (__m128 __A, __m128 __B)
272{
273 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
274}
275
517b0286 276extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 277_mm_cmple_ss (__m128 __A, __m128 __B)
278{
279 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
280}
281
517b0286 282extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 283_mm_cmpgt_ss (__m128 __A, __m128 __B)
284{
c469025e 285 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
286 (__v4sf)
287 __builtin_ia32_cmpltss ((__v4sf) __B,
288 (__v4sf)
289 __A));
d65bfb46 290}
291
517b0286 292extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 293_mm_cmpge_ss (__m128 __A, __m128 __B)
294{
c469025e 295 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
296 (__v4sf)
297 __builtin_ia32_cmpless ((__v4sf) __B,
298 (__v4sf)
299 __A));
d65bfb46 300}
301
517b0286 302extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 303_mm_cmpneq_ss (__m128 __A, __m128 __B)
304{
305 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
306}
307
517b0286 308extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 309_mm_cmpnlt_ss (__m128 __A, __m128 __B)
310{
311 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
312}
313
517b0286 314extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 315_mm_cmpnle_ss (__m128 __A, __m128 __B)
316{
317 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
318}
319
517b0286 320extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 321_mm_cmpngt_ss (__m128 __A, __m128 __B)
322{
c469025e 323 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
324 (__v4sf)
325 __builtin_ia32_cmpnltss ((__v4sf) __B,
326 (__v4sf)
327 __A));
d65bfb46 328}
329
517b0286 330extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 331_mm_cmpnge_ss (__m128 __A, __m128 __B)
332{
c469025e 333 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
334 (__v4sf)
335 __builtin_ia32_cmpnless ((__v4sf) __B,
336 (__v4sf)
337 __A));
d65bfb46 338}
339
517b0286 340extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 341_mm_cmpord_ss (__m128 __A, __m128 __B)
342{
343 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
344}
345
517b0286 346extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 347_mm_cmpunord_ss (__m128 __A, __m128 __B)
348{
349 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
350}
351
352/* Perform a comparison on the four SPFP values of A and B. For each
353 element, if the comparison is true, place a mask of all ones in the
354 result, otherwise a mask of zeros. */
355
517b0286 356extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 357_mm_cmpeq_ps (__m128 __A, __m128 __B)
358{
359 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
360}
361
517b0286 362extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 363_mm_cmplt_ps (__m128 __A, __m128 __B)
364{
365 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
366}
367
517b0286 368extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 369_mm_cmple_ps (__m128 __A, __m128 __B)
370{
371 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
372}
373
517b0286 374extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 375_mm_cmpgt_ps (__m128 __A, __m128 __B)
376{
377 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
378}
379
517b0286 380extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 381_mm_cmpge_ps (__m128 __A, __m128 __B)
382{
383 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
384}
385
517b0286 386extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 387_mm_cmpneq_ps (__m128 __A, __m128 __B)
388{
389 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
390}
391
517b0286 392extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 393_mm_cmpnlt_ps (__m128 __A, __m128 __B)
394{
395 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
396}
397
517b0286 398extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 399_mm_cmpnle_ps (__m128 __A, __m128 __B)
400{
401 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
402}
403
517b0286 404extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 405_mm_cmpngt_ps (__m128 __A, __m128 __B)
406{
407 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
408}
409
517b0286 410extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 411_mm_cmpnge_ps (__m128 __A, __m128 __B)
412{
413 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
414}
415
517b0286 416extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 417_mm_cmpord_ps (__m128 __A, __m128 __B)
418{
419 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
420}
421
517b0286 422extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 423_mm_cmpunord_ps (__m128 __A, __m128 __B)
424{
425 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
426}
427
428/* Compare the lower SPFP values of A and B and return 1 if true
429 and 0 if false. */
430
517b0286 431extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 432_mm_comieq_ss (__m128 __A, __m128 __B)
433{
434 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
435}
436
517b0286 437extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 438_mm_comilt_ss (__m128 __A, __m128 __B)
439{
440 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
441}
442
517b0286 443extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 444_mm_comile_ss (__m128 __A, __m128 __B)
445{
446 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
447}
448
517b0286 449extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 450_mm_comigt_ss (__m128 __A, __m128 __B)
451{
452 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
453}
454
517b0286 455extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 456_mm_comige_ss (__m128 __A, __m128 __B)
457{
458 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
459}
460
517b0286 461extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 462_mm_comineq_ss (__m128 __A, __m128 __B)
463{
464 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
465}
466
517b0286 467extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 468_mm_ucomieq_ss (__m128 __A, __m128 __B)
469{
470 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
471}
472
517b0286 473extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 474_mm_ucomilt_ss (__m128 __A, __m128 __B)
475{
476 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
477}
478
517b0286 479extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 480_mm_ucomile_ss (__m128 __A, __m128 __B)
481{
482 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
483}
484
517b0286 485extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 486_mm_ucomigt_ss (__m128 __A, __m128 __B)
487{
488 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
489}
490
517b0286 491extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 492_mm_ucomige_ss (__m128 __A, __m128 __B)
493{
494 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
495}
496
517b0286 497extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 498_mm_ucomineq_ss (__m128 __A, __m128 __B)
499{
500 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
501}
502
503/* Convert the lower SPFP value to a 32-bit integer according to the current
504 rounding mode. */
517b0286 505extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 506_mm_cvtss_si32 (__m128 __A)
507{
508 return __builtin_ia32_cvtss2si ((__v4sf) __A);
509}
510
517b0286 511extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 512_mm_cvt_ss2si (__m128 __A)
513{
514 return _mm_cvtss_si32 (__A);
515}
516
1f27494a 517#ifdef __x86_64__
52fdc46e 518/* Convert the lower SPFP value to a 32-bit integer according to the
519 current rounding mode. */
520
521/* Intel intrinsic. */
517b0286 522extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 523_mm_cvtss_si64 (__m128 __A)
524{
525 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
526}
527
528/* Microsoft intrinsic. */
517b0286 529extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 530_mm_cvtss_si64x (__m128 __A)
531{
532 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
533}
534#endif
535
d65bfb46 536/* Convert the two lower SPFP values to 32-bit integers according to the
537 current rounding mode. Return the integers in packed form. */
517b0286 538extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 539_mm_cvtps_pi32 (__m128 __A)
540{
541 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
542}
543
517b0286 544extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 545_mm_cvt_ps2pi (__m128 __A)
546{
547 return _mm_cvtps_pi32 (__A);
548}
549
d65bfb46 550/* Truncate the lower SPFP value to a 32-bit integer. */
517b0286 551extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 552_mm_cvttss_si32 (__m128 __A)
553{
554 return __builtin_ia32_cvttss2si ((__v4sf) __A);
555}
556
517b0286 557extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 558_mm_cvtt_ss2si (__m128 __A)
559{
560 return _mm_cvttss_si32 (__A);
561}
562
1f27494a 563#ifdef __x86_64__
564/* Truncate the lower SPFP value to a 32-bit integer. */
52fdc46e 565
566/* Intel intrinsic. */
517b0286 567extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 568_mm_cvttss_si64 (__m128 __A)
569{
570 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
571}
572
573/* Microsoft intrinsic. */
517b0286 574extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 575_mm_cvttss_si64x (__m128 __A)
576{
577 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
578}
579#endif
580
d65bfb46 581/* Truncate the two lower SPFP values to 32-bit integers. Return the
582 integers in packed form. */
517b0286 583extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 584_mm_cvttps_pi32 (__m128 __A)
585{
586 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
587}
588
517b0286 589extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 590_mm_cvtt_ps2pi (__m128 __A)
591{
592 return _mm_cvttps_pi32 (__A);
593}
594
d65bfb46 595/* Convert B to a SPFP value and insert it as element zero in A. */
517b0286 596extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 597_mm_cvtsi32_ss (__m128 __A, int __B)
598{
599 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
600}
601
517b0286 602extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 603_mm_cvt_si2ss (__m128 __A, int __B)
604{
605 return _mm_cvtsi32_ss (__A, __B);
606}
607
1f27494a 608#ifdef __x86_64__
609/* Convert B to a SPFP value and insert it as element zero in A. */
52fdc46e 610
611/* Intel intrinsic. */
517b0286 612extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 613_mm_cvtsi64_ss (__m128 __A, long long __B)
614{
615 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
616}
617
618/* Microsoft intrinsic. */
517b0286 619extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 620_mm_cvtsi64x_ss (__m128 __A, long long __B)
621{
622 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
623}
624#endif
625
d65bfb46 626/* Convert the two 32-bit values in B to SPFP form and insert them
627 as the two lower elements in A. */
517b0286 628extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 629_mm_cvtpi32_ps (__m128 __A, __m64 __B)
630{
631 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
632}
633
517b0286 634extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 635_mm_cvt_pi2ps (__m128 __A, __m64 __B)
636{
637 return _mm_cvtpi32_ps (__A, __B);
638}
639
d65bfb46 640/* Convert the four signed 16-bit values in A to SPFP form. */
517b0286 641extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 642_mm_cvtpi16_ps (__m64 __A)
643{
644 __v4hi __sign;
645 __v2si __hisi, __losi;
6e22f3c7 646 __v4sf __zero, __ra, __rb;
d65bfb46 647
648 /* This comparison against zero gives us a mask that can be used to
649 fill in the missing sign bits in the unpack operations below, so
650 that we get signed values after unpacking. */
ad2c46cf 651 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
d65bfb46 652
653 /* Convert the four words to doublewords. */
d65bfb46 654 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
850c7736 655 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
d65bfb46 656
657 /* Convert the doublewords to floating point two at a time. */
6e22f3c7 658 __zero = (__v4sf) _mm_setzero_ps ();
850c7736 659 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
660 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
d65bfb46 661
6e22f3c7 662 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
d65bfb46 663}
664
665/* Convert the four unsigned 16-bit values in A to SPFP form. */
517b0286 666extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 667_mm_cvtpu16_ps (__m64 __A)
668{
d65bfb46 669 __v2si __hisi, __losi;
6e22f3c7 670 __v4sf __zero, __ra, __rb;
d65bfb46 671
672 /* Convert the four words to doublewords. */
ad2c46cf 673 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
850c7736 674 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
d65bfb46 675
676 /* Convert the doublewords to floating point two at a time. */
6e22f3c7 677 __zero = (__v4sf) _mm_setzero_ps ();
850c7736 678 __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
679 __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
d65bfb46 680
6e22f3c7 681 return (__m128) __builtin_ia32_movlhps (__ra, __rb);
d65bfb46 682}
683
684/* Convert the low four signed 8-bit values in A to SPFP form. */
517b0286 685extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 686_mm_cvtpi8_ps (__m64 __A)
687{
688 __v8qi __sign;
689
690 /* This comparison against zero gives us a mask that can be used to
691 fill in the missing sign bits in the unpack operations below, so
692 that we get signed values after unpacking. */
ad2c46cf 693 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
d65bfb46 694
695 /* Convert the four low bytes to words. */
696 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
697
698 return _mm_cvtpi16_ps(__A);
699}
700
701/* Convert the low four unsigned 8-bit values in A to SPFP form. */
517b0286 702extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 703_mm_cvtpu8_ps(__m64 __A)
704{
ad2c46cf 705 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
d65bfb46 706 return _mm_cvtpu16_ps(__A);
707}
708
709/* Convert the four signed 32-bit values in A and B to SPFP form. */
517b0286 710extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 711_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
712{
ad2c46cf 713 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
d65bfb46 714 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
6e22f3c7 715 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
d65bfb46 716 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
717}
718
719/* Convert the four SPFP values in A to four signed 16-bit integers. */
517b0286 720extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 721_mm_cvtps_pi16(__m128 __A)
722{
723 __v4sf __hisf = (__v4sf)__A;
724 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
725 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
726 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
717b9435 727 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
d65bfb46 728}
729
730/* Convert the four SPFP values in A to four signed 8-bit integers. */
517b0286 731extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 732_mm_cvtps_pi8(__m128 __A)
733{
734 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
ad2c46cf 735 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
d65bfb46 736}
737
738/* Selects four specific SPFP values from A and B based on MASK. */
1a60bb06 739#ifdef __OPTIMIZE__
517b0286 740extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
4dfac92d 741_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
d65bfb46 742{
743 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
744}
d61e5c1b 745#else
5f76c0f8 746#define _mm_shuffle_ps(A, B, MASK) \
747 ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
748 (__v4sf)(__m128)(B), (int)(MASK)))
d61e5c1b 749#endif
d65bfb46 750
751/* Selects and interleaves the upper two SPFP values from A and B. */
517b0286 752extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 753_mm_unpackhi_ps (__m128 __A, __m128 __B)
754{
755 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
756}
757
758/* Selects and interleaves the lower two SPFP values from A and B. */
517b0286 759extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 760_mm_unpacklo_ps (__m128 __A, __m128 __B)
761{
762 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
763}
764
765/* Sets the upper two SPFP values with 64-bits of data loaded from P;
766 the lower two values are passed through from A. */
517b0286 767extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 768_mm_loadh_pi (__m128 __A, __m64 const *__P)
d65bfb46 769{
875a66b2 770 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
d65bfb46 771}
772
773/* Stores the upper two SPFP values of A into P. */
517b0286 774extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 775_mm_storeh_pi (__m64 *__P, __m128 __A)
776{
875a66b2 777 __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
d65bfb46 778}
779
780/* Moves the upper two values of B into the lower two values of A. */
517b0286 781extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 782_mm_movehl_ps (__m128 __A, __m128 __B)
783{
784 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
785}
786
787/* Moves the lower two values of B into the upper two values of A. */
517b0286 788extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 789_mm_movelh_ps (__m128 __A, __m128 __B)
790{
791 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
792}
793
794/* Sets the lower two SPFP values with 64-bits of data loaded from P;
795 the upper two values are passed through from A. */
517b0286 796extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 797_mm_loadl_pi (__m128 __A, __m64 const *__P)
d65bfb46 798{
875a66b2 799 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
d65bfb46 800}
801
802/* Stores the lower two SPFP values of A into P. */
517b0286 803extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 804_mm_storel_pi (__m64 *__P, __m128 __A)
805{
875a66b2 806 __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
d65bfb46 807}
808
809/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
517b0286 810extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 811_mm_movemask_ps (__m128 __A)
812{
813 return __builtin_ia32_movmskps ((__v4sf)__A);
814}
815
816/* Return the contents of the control register. */
517b0286 817extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 818_mm_getcsr (void)
819{
3381a03a 820 return __builtin_ia32_stmxcsr ();
d65bfb46 821}
822
823/* Read exception bits from the control register. */
517b0286 824extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 825_MM_GET_EXCEPTION_STATE (void)
826{
827 return _mm_getcsr() & _MM_EXCEPT_MASK;
828}
829
517b0286 830extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 831_MM_GET_EXCEPTION_MASK (void)
832{
833 return _mm_getcsr() & _MM_MASK_MASK;
834}
835
517b0286 836extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 837_MM_GET_ROUNDING_MODE (void)
838{
839 return _mm_getcsr() & _MM_ROUND_MASK;
840}
841
517b0286 842extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 843_MM_GET_FLUSH_ZERO_MODE (void)
844{
845 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
846}
847
848/* Set the control register to I. */
517b0286 849extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 850_mm_setcsr (unsigned int __I)
851{
3381a03a 852 __builtin_ia32_ldmxcsr (__I);
d65bfb46 853}
854
855/* Set exception bits in the control register. */
517b0286 856extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 857_MM_SET_EXCEPTION_STATE(unsigned int __mask)
858{
859 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
860}
861
517b0286 862extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 863_MM_SET_EXCEPTION_MASK (unsigned int __mask)
864{
865 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
866}
867
517b0286 868extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 869_MM_SET_ROUNDING_MODE (unsigned int __mode)
870{
871 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
872}
873
517b0286 874extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 875_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
876{
877 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
878}
879
ad2c46cf 880/* Create a vector with element 0 as F and the rest zero. */
517b0286 881extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 882_mm_set_ss (float __F)
883{
712fea20 884 return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
ad2c46cf 885}
886
887/* Create a vector with all four elements equal to F. */
517b0286 888extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 889_mm_set1_ps (float __F)
890{
882b157f 891 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
ad2c46cf 892}
893
517b0286 894extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 895_mm_set_ps1 (float __F)
896{
897 return _mm_set1_ps (__F);
898}
899
d65bfb46 900/* Create a vector with element 0 as *P and the rest zero. */
517b0286 901extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 902_mm_load_ss (float const *__P)
d65bfb46 903{
ad2c46cf 904 return _mm_set_ss (*__P);
d65bfb46 905}
906
907/* Create a vector with all four elements equal to *P. */
517b0286 908extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 909_mm_load1_ps (float const *__P)
d65bfb46 910{
ad2c46cf 911 return _mm_set1_ps (*__P);
d65bfb46 912}
913
517b0286 914extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 915_mm_load_ps1 (float const *__P)
d65bfb46 916{
917 return _mm_load1_ps (__P);
918}
919
920/* Load four SPFP values from P. The address must be 16-byte aligned. */
517b0286 921extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 922_mm_load_ps (float const *__P)
d65bfb46 923{
ad2c46cf 924 return (__m128) *(__v4sf *)__P;
d65bfb46 925}
926
927/* Load four SPFP values from P. The address need not be 16-byte aligned. */
517b0286 928extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 929_mm_loadu_ps (float const *__P)
d65bfb46 930{
931 return (__m128) __builtin_ia32_loadups (__P);
932}
933
934/* Load four SPFP values in reverse order. The address must be aligned. */
517b0286 935extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
a7fa1ad5 936_mm_loadr_ps (float const *__P)
d65bfb46 937{
ad2c46cf 938 __v4sf __tmp = *(__v4sf *)__P;
d65bfb46 939 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
940}
941
d65bfb46 942/* Create the vector [Z Y X W]. */
517b0286 943extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
b8d2bcdd 944_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
d65bfb46 945{
882b157f 946 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
d65bfb46 947}
948
949/* Create the vector [W X Y Z]. */
517b0286 950extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 951_mm_setr_ps (float __Z, float __Y, float __X, float __W)
952{
882b157f 953 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
d65bfb46 954}
955
956/* Stores the lower SPFP value. */
517b0286 957extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 958_mm_store_ss (float *__P, __m128 __A)
959{
d521a5b2 960 *__P = ((__v4sf)__A)[0];
d65bfb46 961}
962
517b0286 963extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 964_mm_cvtss_f32 (__m128 __A)
965{
d521a5b2 966 return ((__v4sf)__A)[0];
52fdc46e 967}
968
ad2c46cf 969/* Store four SPFP values. The address must be 16-byte aligned. */
517b0286 970extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 971_mm_store_ps (float *__P, __m128 __A)
d65bfb46 972{
ad2c46cf 973 *(__v4sf *)__P = (__v4sf)__A;
d65bfb46 974}
975
ad2c46cf 976/* Store four SPFP values. The address need not be 16-byte aligned. */
517b0286 977extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 978_mm_storeu_ps (float *__P, __m128 __A)
d65bfb46 979{
ad2c46cf 980 __builtin_ia32_storeups (__P, (__v4sf)__A);
d65bfb46 981}
982
ad2c46cf 983/* Store the lower SPFP value across four words. */
517b0286 984extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 985_mm_store1_ps (float *__P, __m128 __A)
d65bfb46 986{
ad2c46cf 987 __v4sf __va = (__v4sf)__A;
988 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
989 _mm_storeu_ps (__P, __tmp);
d65bfb46 990}
991
517b0286 992extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 993_mm_store_ps1 (float *__P, __m128 __A)
d65bfb46 994{
ad2c46cf 995 _mm_store1_ps (__P, __A);
d65bfb46 996}
997
f747aa2d 998/* Store four SPFP values in reverse order. The address must be aligned. */
517b0286 999extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1000_mm_storer_ps (float *__P, __m128 __A)
1001{
1002 __v4sf __va = (__v4sf)__A;
1003 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
ad2c46cf 1004 _mm_store_ps (__P, __tmp);
d65bfb46 1005}
1006
1007/* Sets the low SPFP value of A from the low value of B. */
517b0286 1008extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1009_mm_move_ss (__m128 __A, __m128 __B)
1010{
1011 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
1012}
1013
1014/* Extracts one of the four words of A. The selector N must be immediate. */
1a60bb06 1015#ifdef __OPTIMIZE__
517b0286 1016extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 1017_mm_extract_pi16 (__m64 const __A, int const __N)
d65bfb46 1018{
ad2c46cf 1019 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
d65bfb46 1020}
3024f45d 1021
517b0286 1022extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 1023_m_pextrw (__m64 const __A, int const __N)
3024f45d 1024{
1025 return _mm_extract_pi16 (__A, __N);
1026}
d61e5c1b 1027#else
5f76c0f8 1028#define _mm_extract_pi16(A, N) \
1029 ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
8d308471 1030
1031#define _m_pextrw(A, N) _mm_extract_pi16(A, N)
d61e5c1b 1032#endif
d65bfb46 1033
1034/* Inserts word D into one of four words of A. The selector N must be
1035 immediate. */
1a60bb06 1036#ifdef __OPTIMIZE__
517b0286 1037extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 1038_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
d65bfb46 1039{
ad2c46cf 1040 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
d65bfb46 1041}
3024f45d 1042
517b0286 1043extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
ad2c46cf 1044_m_pinsrw (__m64 const __A, int const __D, int const __N)
3024f45d 1045{
1046 return _mm_insert_pi16 (__A, __D, __N);
1047}
d61e5c1b 1048#else
5f76c0f8 1049#define _mm_insert_pi16(A, D, N) \
1050 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
1051 (int)(D), (int)(N)))
8d308471 1052
1053#define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
d61e5c1b 1054#endif
d65bfb46 1055
1056/* Compute the element-wise maximum of signed 16-bit values. */
517b0286 1057extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1058_mm_max_pi16 (__m64 __A, __m64 __B)
1059{
1060 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1061}
1062
517b0286 1063extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1064_m_pmaxsw (__m64 __A, __m64 __B)
1065{
1066 return _mm_max_pi16 (__A, __B);
1067}
1068
d65bfb46 1069/* Compute the element-wise maximum of unsigned 8-bit values. */
517b0286 1070extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1071_mm_max_pu8 (__m64 __A, __m64 __B)
1072{
1073 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1074}
1075
517b0286 1076extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1077_m_pmaxub (__m64 __A, __m64 __B)
1078{
1079 return _mm_max_pu8 (__A, __B);
1080}
1081
d65bfb46 1082/* Compute the element-wise minimum of signed 16-bit values. */
517b0286 1083extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1084_mm_min_pi16 (__m64 __A, __m64 __B)
1085{
1086 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1087}
1088
517b0286 1089extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1090_m_pminsw (__m64 __A, __m64 __B)
1091{
1092 return _mm_min_pi16 (__A, __B);
1093}
1094
d65bfb46 1095/* Compute the element-wise minimum of unsigned 8-bit values. */
517b0286 1096extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1097_mm_min_pu8 (__m64 __A, __m64 __B)
1098{
1099 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1100}
1101
517b0286 1102extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1103_m_pminub (__m64 __A, __m64 __B)
1104{
1105 return _mm_min_pu8 (__A, __B);
1106}
1107
d65bfb46 1108/* Create an 8-bit mask of the signs of 8-bit values. */
517b0286 1109extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1110_mm_movemask_pi8 (__m64 __A)
1111{
1112 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1113}
1114
517b0286 1115extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1116_m_pmovmskb (__m64 __A)
1117{
1118 return _mm_movemask_pi8 (__A);
1119}
1120
d65bfb46 1121/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1122 in B and produce the high 16 bits of the 32-bit results. */
517b0286 1123extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1124_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1125{
1126 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1127}
1128
517b0286 1129extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1130_m_pmulhuw (__m64 __A, __m64 __B)
1131{
1132 return _mm_mulhi_pu16 (__A, __B);
1133}
1134
d65bfb46 1135/* Return a combination of the four 16-bit values in A. The selector
1136 must be an immediate. */
1a60bb06 1137#ifdef __OPTIMIZE__
517b0286 1138extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
4dfac92d 1139_mm_shuffle_pi16 (__m64 __A, int const __N)
d65bfb46 1140{
1141 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1142}
3024f45d 1143
517b0286 1144extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
4dfac92d 1145_m_pshufw (__m64 __A, int const __N)
3024f45d 1146{
1147 return _mm_shuffle_pi16 (__A, __N);
1148}
d61e5c1b 1149#else
1150#define _mm_shuffle_pi16(A, N) \
5f76c0f8 1151 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
8d308471 1152
1153#define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
d61e5c1b 1154#endif
d65bfb46 1155
1156/* Conditionally store byte elements of A into P. The high bit of each
1157 byte in the selector N determines whether the corresponding byte from
1158 A is stored. */
517b0286 1159extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1160_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1161{
1162 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1163}
1164
517b0286 1165extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1166_m_maskmovq (__m64 __A, __m64 __N, char *__P)
1167{
1168 _mm_maskmove_si64 (__A, __N, __P);
1169}
1170
d65bfb46 1171/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
517b0286 1172extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1173_mm_avg_pu8 (__m64 __A, __m64 __B)
1174{
1175 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1176}
1177
517b0286 1178extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1179_m_pavgb (__m64 __A, __m64 __B)
1180{
1181 return _mm_avg_pu8 (__A, __B);
1182}
1183
d65bfb46 1184/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
517b0286 1185extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1186_mm_avg_pu16 (__m64 __A, __m64 __B)
1187{
1188 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1189}
1190
517b0286 1191extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1192_m_pavgw (__m64 __A, __m64 __B)
1193{
1194 return _mm_avg_pu16 (__A, __B);
1195}
1196
d65bfb46 1197/* Compute the sum of the absolute differences of the unsigned 8-bit
1198 values in A and B. Return the value in the lower 16-bit word; the
1199 upper words are cleared. */
517b0286 1200extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1201_mm_sad_pu8 (__m64 __A, __m64 __B)
1202{
1203 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1204}
1205
517b0286 1206extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 1207_m_psadbw (__m64 __A, __m64 __B)
1208{
1209 return _mm_sad_pu8 (__A, __B);
1210}
1211
d65bfb46 1212/* Stores the data in A to the address P without polluting the caches. */
517b0286 1213extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1214_mm_stream_pi (__m64 *__P, __m64 __A)
1215{
c2f63288 1216 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
d65bfb46 1217}
1218
1219/* Likewise. The address must be 16-byte aligned. */
517b0286 1220extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1221_mm_stream_ps (float *__P, __m128 __A)
1222{
1223 __builtin_ia32_movntps (__P, (__v4sf)__A);
1224}
1225
fcbfedc7 1226/* Guarantees that every preceding store is globally visible before
d65bfb46 1227 any subsequent store. */
517b0286 1228extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 1229_mm_sfence (void)
1230{
1231 __builtin_ia32_sfence ();
1232}
1233
d65bfb46 1234/* Transpose the 4x4 matrix composed of row[0-3]. */
1235#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1236do { \
1237 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
ec6fefb6 1238 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
631252e1 1239 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1240 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
ec6fefb6 1241 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1242 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1243 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1244 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1245 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
d65bfb46 1246} while (0)
1247
e829311e 1248/* For backward source compatibility. */
bfad7f66 1249# include <emmintrin.h>
d3ceaee1 1250
ef21d40e 1251#ifdef __DISABLE_SSE__
1252#undef __DISABLE_SSE__
1253#pragma GCC pop_options
1254#endif /* __DISABLE_SSE__ */
1255
8776d2d7 1256/* The execution of the next instruction is delayed by an implementation
1257 specific amount of time. The instruction does not modify the
1258 architectural state. This is after the pop_options pragma because
1259 it does not require SSE support in the processor--the encoding is a
1260 nop on processors that do not support it. */
1261extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1262_mm_pause (void)
1263{
1264 __builtin_ia32_pause ();
1265}
1266
d65bfb46 1267#endif /* _XMMINTRIN_H_INCLUDED */