]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/i386/mmintrin.h
Update copyright years.
[thirdparty/gcc.git] / gcc / config / i386 / mmintrin.h
CommitLineData
f1717362 1/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
d65bfb46 2
7d9ae20a 3 This file is part of GCC.
d65bfb46 4
7d9ae20a 5 GCC is free software; you can redistribute it and/or modify
d65bfb46 6 it under the terms of the GNU General Public License as published by
6bc9506f 7 the Free Software Foundation; either version 3, or (at your option)
d65bfb46 8 any later version.
9
7d9ae20a 10 GCC is distributed in the hope that it will be useful,
d65bfb46 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
6bc9506f 15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
d65bfb46 23
24/* Implemented from the specification included in the Intel C++ Compiler
52fdc46e 25 User Guide and Reference, version 9.0. */
d65bfb46 26
27#ifndef _MMINTRIN_H_INCLUDED
28#define _MMINTRIN_H_INCLUDED
29
3653c78a 30#ifndef __MMX__
ef21d40e 31#pragma GCC push_options
32#pragma GCC target("mmx")
33#define __DISABLE_MMX__
34#endif /* __MMX__ */
35
0e960ba8 36/* The Intel API is flexible enough that we must allow aliasing with other
37 vector types, and their scalar components. */
38typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
d65bfb46 39
40/* Internal data types for implementing the intrinsics. */
aff6787f 41typedef int __v2si __attribute__ ((__vector_size__ (8)));
42typedef short __v4hi __attribute__ ((__vector_size__ (8)));
43typedef char __v8qi __attribute__ ((__vector_size__ (8)));
7916ca8a 44typedef long long __v1di __attribute__ ((__vector_size__ (8)));
875a66b2 45typedef float __v2sf __attribute__ ((__vector_size__ (8)));
d65bfb46 46
47/* Empty the multimedia state. */
517b0286 48extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 49_mm_empty (void)
50{
51 __builtin_ia32_emms ();
52}
53
517b0286 54extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 55_m_empty (void)
56{
57 _mm_empty ();
58}
59
d65bfb46 60/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
517b0286 61extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 62_mm_cvtsi32_si64 (int __i)
63{
8631d44f 64 return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
d65bfb46 65}
66
517b0286 67extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 68_m_from_int (int __i)
69{
70 return _mm_cvtsi32_si64 (__i);
71}
72
1f27494a 73#ifdef __x86_64__
74/* Convert I to a __m64 object. */
52fdc46e 75
76/* Intel intrinsic. */
517b0286 77extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 78_m_from_int64 (long long __i)
79{
80 return (__m64) __i;
81}
82
517b0286 83extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 84_mm_cvtsi64_m64 (long long __i)
85{
86 return (__m64) __i;
87}
88
89/* Microsoft intrinsic. */
517b0286 90extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 91_mm_cvtsi64x_si64 (long long __i)
92{
93 return (__m64) __i;
94}
95
517b0286 96extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 97_mm_set_pi64x (long long __i)
98{
99 return (__m64) __i;
100}
101#endif
102
d65bfb46 103/* Convert the lower 32 bits of the __m64 object into an integer. */
517b0286 104extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 105_mm_cvtsi64_si32 (__m64 __i)
106{
8631d44f 107 return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
d65bfb46 108}
109
517b0286 110extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 111_m_to_int (__m64 __i)
112{
113 return _mm_cvtsi64_si32 (__i);
114}
115
1f27494a 116#ifdef __x86_64__
52fdc46e 117/* Convert the __m64 object to a 64bit integer. */
118
119/* Intel intrinsic. */
517b0286 120extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 121_m_to_int64 (__m64 __i)
122{
123 return (long long)__i;
124}
125
517b0286 126extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52fdc46e 127_mm_cvtm64_si64 (__m64 __i)
128{
129 return (long long)__i;
130}
131
132/* Microsoft intrinsic. */
517b0286 133extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1f27494a 134_mm_cvtsi64_si64x (__m64 __i)
135{
136 return (long long)__i;
137}
138#endif
139
d65bfb46 140/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
141 the result, and the four 16-bit values from M2 into the upper four 8-bit
142 values of the result, all with signed saturation. */
517b0286 143extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 144_mm_packs_pi16 (__m64 __m1, __m64 __m2)
145{
146 return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
147}
148
517b0286 149extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 150_m_packsswb (__m64 __m1, __m64 __m2)
151{
152 return _mm_packs_pi16 (__m1, __m2);
153}
154
d65bfb46 155/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
156 the result, and the two 32-bit values from M2 into the upper two 16-bit
157 values of the result, all with signed saturation. */
517b0286 158extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 159_mm_packs_pi32 (__m64 __m1, __m64 __m2)
160{
161 return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
162}
163
517b0286 164extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 165_m_packssdw (__m64 __m1, __m64 __m2)
166{
167 return _mm_packs_pi32 (__m1, __m2);
168}
169
d65bfb46 170/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
171 the result, and the four 16-bit values from M2 into the upper four 8-bit
172 values of the result, all with unsigned saturation. */
517b0286 173extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 174_mm_packs_pu16 (__m64 __m1, __m64 __m2)
175{
176 return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
177}
178
517b0286 179extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 180_m_packuswb (__m64 __m1, __m64 __m2)
181{
182 return _mm_packs_pu16 (__m1, __m2);
183}
184
d65bfb46 185/* Interleave the four 8-bit values from the high half of M1 with the four
186 8-bit values from the high half of M2. */
517b0286 187extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 188_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
189{
190 return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
191}
192
517b0286 193extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 194_m_punpckhbw (__m64 __m1, __m64 __m2)
195{
196 return _mm_unpackhi_pi8 (__m1, __m2);
197}
198
d65bfb46 199/* Interleave the two 16-bit values from the high half of M1 with the two
200 16-bit values from the high half of M2. */
517b0286 201extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 202_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
203{
204 return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
205}
206
517b0286 207extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 208_m_punpckhwd (__m64 __m1, __m64 __m2)
209{
210 return _mm_unpackhi_pi16 (__m1, __m2);
211}
212
d65bfb46 213/* Interleave the 32-bit value from the high half of M1 with the 32-bit
214 value from the high half of M2. */
517b0286 215extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 216_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
217{
218 return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
219}
220
517b0286 221extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 222_m_punpckhdq (__m64 __m1, __m64 __m2)
223{
224 return _mm_unpackhi_pi32 (__m1, __m2);
225}
226
d65bfb46 227/* Interleave the four 8-bit values from the low half of M1 with the four
228 8-bit values from the low half of M2. */
517b0286 229extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 230_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
231{
232 return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
233}
234
517b0286 235extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 236_m_punpcklbw (__m64 __m1, __m64 __m2)
237{
238 return _mm_unpacklo_pi8 (__m1, __m2);
239}
240
d65bfb46 241/* Interleave the two 16-bit values from the low half of M1 with the two
242 16-bit values from the low half of M2. */
517b0286 243extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 244_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
245{
246 return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
247}
248
517b0286 249extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 250_m_punpcklwd (__m64 __m1, __m64 __m2)
251{
252 return _mm_unpacklo_pi16 (__m1, __m2);
253}
254
d65bfb46 255/* Interleave the 32-bit value from the low half of M1 with the 32-bit
256 value from the low half of M2. */
517b0286 257extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 258_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
259{
260 return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
261}
262
517b0286 263extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 264_m_punpckldq (__m64 __m1, __m64 __m2)
265{
266 return _mm_unpacklo_pi32 (__m1, __m2);
267}
268
d65bfb46 269/* Add the 8-bit values in M1 to the 8-bit values in M2. */
517b0286 270extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 271_mm_add_pi8 (__m64 __m1, __m64 __m2)
272{
273 return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
274}
275
517b0286 276extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 277_m_paddb (__m64 __m1, __m64 __m2)
278{
279 return _mm_add_pi8 (__m1, __m2);
280}
281
d65bfb46 282/* Add the 16-bit values in M1 to the 16-bit values in M2. */
517b0286 283extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 284_mm_add_pi16 (__m64 __m1, __m64 __m2)
285{
286 return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
287}
288
517b0286 289extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 290_m_paddw (__m64 __m1, __m64 __m2)
291{
292 return _mm_add_pi16 (__m1, __m2);
293}
294
d65bfb46 295/* Add the 32-bit values in M1 to the 32-bit values in M2. */
517b0286 296extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 297_mm_add_pi32 (__m64 __m1, __m64 __m2)
298{
299 return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
300}
301
517b0286 302extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 303_m_paddd (__m64 __m1, __m64 __m2)
304{
305 return _mm_add_pi32 (__m1, __m2);
306}
307
b28bedce 308/* Add the 64-bit values in M1 to the 64-bit values in M2. */
ef21d40e 309#ifndef __SSE2__
310#pragma GCC push_options
311#pragma GCC target("sse2")
312#define __DISABLE_SSE2__
313#endif /* __SSE2__ */
314
517b0286 315extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
b28bedce 316_mm_add_si64 (__m64 __m1, __m64 __m2)
317{
7916ca8a 318 return (__m64) __builtin_ia32_paddq ((__v1di)__m1, (__v1di)__m2);
b28bedce 319}
ef21d40e 320#ifdef __DISABLE_SSE2__
321#undef __DISABLE_SSE2__
322#pragma GCC pop_options
323#endif /* __DISABLE_SSE2__ */
b28bedce 324
d65bfb46 325/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
326 saturated arithmetic. */
517b0286 327extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 328_mm_adds_pi8 (__m64 __m1, __m64 __m2)
329{
330 return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
331}
332
517b0286 333extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 334_m_paddsb (__m64 __m1, __m64 __m2)
335{
336 return _mm_adds_pi8 (__m1, __m2);
337}
338
d65bfb46 339/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
340 saturated arithmetic. */
517b0286 341extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 342_mm_adds_pi16 (__m64 __m1, __m64 __m2)
343{
344 return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
345}
346
517b0286 347extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 348_m_paddsw (__m64 __m1, __m64 __m2)
349{
350 return _mm_adds_pi16 (__m1, __m2);
351}
352
d65bfb46 353/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
354 saturated arithmetic. */
517b0286 355extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 356_mm_adds_pu8 (__m64 __m1, __m64 __m2)
357{
358 return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
359}
360
517b0286 361extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 362_m_paddusb (__m64 __m1, __m64 __m2)
363{
364 return _mm_adds_pu8 (__m1, __m2);
365}
366
d65bfb46 367/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
368 saturated arithmetic. */
517b0286 369extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 370_mm_adds_pu16 (__m64 __m1, __m64 __m2)
371{
372 return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
373}
374
517b0286 375extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 376_m_paddusw (__m64 __m1, __m64 __m2)
377{
378 return _mm_adds_pu16 (__m1, __m2);
379}
380
d65bfb46 381/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
517b0286 382extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 383_mm_sub_pi8 (__m64 __m1, __m64 __m2)
384{
385 return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
386}
387
517b0286 388extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 389_m_psubb (__m64 __m1, __m64 __m2)
390{
391 return _mm_sub_pi8 (__m1, __m2);
392}
393
d65bfb46 394/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
517b0286 395extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 396_mm_sub_pi16 (__m64 __m1, __m64 __m2)
397{
398 return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
399}
400
517b0286 401extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 402_m_psubw (__m64 __m1, __m64 __m2)
403{
404 return _mm_sub_pi16 (__m1, __m2);
405}
406
d65bfb46 407/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
517b0286 408extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 409_mm_sub_pi32 (__m64 __m1, __m64 __m2)
410{
411 return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
412}
413
517b0286 414extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 415_m_psubd (__m64 __m1, __m64 __m2)
416{
417 return _mm_sub_pi32 (__m1, __m2);
418}
419
b28bedce 420/* Add the 64-bit values in M1 to the 64-bit values in M2. */
ef21d40e 421#ifndef __SSE2__
422#pragma GCC push_options
423#pragma GCC target("sse2")
424#define __DISABLE_SSE2__
425#endif /* __SSE2__ */
426
517b0286 427extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
b28bedce 428_mm_sub_si64 (__m64 __m1, __m64 __m2)
429{
7916ca8a 430 return (__m64) __builtin_ia32_psubq ((__v1di)__m1, (__v1di)__m2);
b28bedce 431}
ef21d40e 432#ifdef __DISABLE_SSE2__
433#undef __DISABLE_SSE2__
434#pragma GCC pop_options
435#endif /* __DISABLE_SSE2__ */
b28bedce 436
d65bfb46 437/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
438 saturating arithmetic. */
517b0286 439extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 440_mm_subs_pi8 (__m64 __m1, __m64 __m2)
441{
442 return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
443}
444
517b0286 445extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 446_m_psubsb (__m64 __m1, __m64 __m2)
447{
448 return _mm_subs_pi8 (__m1, __m2);
449}
450
d65bfb46 451/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
452 signed saturating arithmetic. */
517b0286 453extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 454_mm_subs_pi16 (__m64 __m1, __m64 __m2)
455{
456 return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
457}
458
517b0286 459extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 460_m_psubsw (__m64 __m1, __m64 __m2)
461{
462 return _mm_subs_pi16 (__m1, __m2);
463}
464
d65bfb46 465/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
466 unsigned saturating arithmetic. */
517b0286 467extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 468_mm_subs_pu8 (__m64 __m1, __m64 __m2)
469{
470 return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
471}
472
517b0286 473extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 474_m_psubusb (__m64 __m1, __m64 __m2)
475{
476 return _mm_subs_pu8 (__m1, __m2);
477}
478
d65bfb46 479/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
480 unsigned saturating arithmetic. */
517b0286 481extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 482_mm_subs_pu16 (__m64 __m1, __m64 __m2)
483{
484 return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
485}
486
517b0286 487extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 488_m_psubusw (__m64 __m1, __m64 __m2)
489{
490 return _mm_subs_pu16 (__m1, __m2);
491}
492
d65bfb46 493/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
494 four 32-bit intermediate results, which are then summed by pairs to
495 produce two 32-bit results. */
517b0286 496extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 497_mm_madd_pi16 (__m64 __m1, __m64 __m2)
498{
499 return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
500}
501
517b0286 502extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 503_m_pmaddwd (__m64 __m1, __m64 __m2)
504{
505 return _mm_madd_pi16 (__m1, __m2);
506}
507
d65bfb46 508/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
509 M2 and produce the high 16 bits of the 32-bit results. */
517b0286 510extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 511_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
512{
513 return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
514}
515
517b0286 516extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 517_m_pmulhw (__m64 __m1, __m64 __m2)
518{
519 return _mm_mulhi_pi16 (__m1, __m2);
520}
521
d65bfb46 522/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
523 the low 16 bits of the results. */
517b0286 524extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 525_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
526{
527 return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
528}
529
517b0286 530extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 531_m_pmullw (__m64 __m1, __m64 __m2)
532{
533 return _mm_mullo_pi16 (__m1, __m2);
534}
535
d65bfb46 536/* Shift four 16-bit values in M left by COUNT. */
517b0286 537extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 538_mm_sll_pi16 (__m64 __m, __m64 __count)
539{
7916ca8a 540 return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (__v4hi)__count);
d65bfb46 541}
542
517b0286 543extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 544_m_psllw (__m64 __m, __m64 __count)
545{
546 return _mm_sll_pi16 (__m, __count);
547}
548
517b0286 549extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 550_mm_slli_pi16 (__m64 __m, int __count)
551{
7916ca8a 552 return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count);
d65bfb46 553}
554
517b0286 555extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 556_m_psllwi (__m64 __m, int __count)
557{
558 return _mm_slli_pi16 (__m, __count);
559}
560
d65bfb46 561/* Shift two 32-bit values in M left by COUNT. */
517b0286 562extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 563_mm_sll_pi32 (__m64 __m, __m64 __count)
564{
7916ca8a 565 return (__m64) __builtin_ia32_pslld ((__v2si)__m, (__v2si)__count);
d65bfb46 566}
567
517b0286 568extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 569_m_pslld (__m64 __m, __m64 __count)
570{
571 return _mm_sll_pi32 (__m, __count);
572}
573
517b0286 574extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 575_mm_slli_pi32 (__m64 __m, int __count)
576{
7916ca8a 577 return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count);
d65bfb46 578}
579
517b0286 580extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 581_m_pslldi (__m64 __m, int __count)
582{
583 return _mm_slli_pi32 (__m, __count);
584}
585
d65bfb46 586/* Shift the 64-bit value in M left by COUNT. */
517b0286 587extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
2feb8eca 588_mm_sll_si64 (__m64 __m, __m64 __count)
d65bfb46 589{
7916ca8a 590 return (__m64) __builtin_ia32_psllq ((__v1di)__m, (__v1di)__count);
d65bfb46 591}
592
517b0286 593extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 594_m_psllq (__m64 __m, __m64 __count)
595{
596 return _mm_sll_si64 (__m, __count);
597}
598
517b0286 599extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
2feb8eca 600_mm_slli_si64 (__m64 __m, int __count)
d65bfb46 601{
7916ca8a 602 return (__m64) __builtin_ia32_psllqi ((__v1di)__m, __count);
d65bfb46 603}
604
517b0286 605extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 606_m_psllqi (__m64 __m, int __count)
607{
608 return _mm_slli_si64 (__m, __count);
609}
610
d65bfb46 611/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
517b0286 612extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 613_mm_sra_pi16 (__m64 __m, __m64 __count)
614{
7916ca8a 615 return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (__v4hi)__count);
d65bfb46 616}
617
517b0286 618extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 619_m_psraw (__m64 __m, __m64 __count)
620{
621 return _mm_sra_pi16 (__m, __count);
622}
623
517b0286 624extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 625_mm_srai_pi16 (__m64 __m, int __count)
626{
7916ca8a 627 return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count);
d65bfb46 628}
629
517b0286 630extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 631_m_psrawi (__m64 __m, int __count)
632{
633 return _mm_srai_pi16 (__m, __count);
634}
635
d65bfb46 636/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
517b0286 637extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 638_mm_sra_pi32 (__m64 __m, __m64 __count)
639{
7916ca8a 640 return (__m64) __builtin_ia32_psrad ((__v2si)__m, (__v2si)__count);
d65bfb46 641}
642
517b0286 643extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 644_m_psrad (__m64 __m, __m64 __count)
645{
646 return _mm_sra_pi32 (__m, __count);
647}
648
517b0286 649extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 650_mm_srai_pi32 (__m64 __m, int __count)
651{
7916ca8a 652 return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count);
d65bfb46 653}
654
517b0286 655extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 656_m_psradi (__m64 __m, int __count)
657{
658 return _mm_srai_pi32 (__m, __count);
659}
660
d65bfb46 661/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
517b0286 662extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 663_mm_srl_pi16 (__m64 __m, __m64 __count)
664{
7916ca8a 665 return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (__v4hi)__count);
d65bfb46 666}
667
517b0286 668extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 669_m_psrlw (__m64 __m, __m64 __count)
670{
671 return _mm_srl_pi16 (__m, __count);
672}
673
517b0286 674extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 675_mm_srli_pi16 (__m64 __m, int __count)
676{
7916ca8a 677 return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count);
d65bfb46 678}
679
517b0286 680extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 681_m_psrlwi (__m64 __m, int __count)
682{
683 return _mm_srli_pi16 (__m, __count);
684}
685
d65bfb46 686/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
517b0286 687extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 688_mm_srl_pi32 (__m64 __m, __m64 __count)
689{
7916ca8a 690 return (__m64) __builtin_ia32_psrld ((__v2si)__m, (__v2si)__count);
d65bfb46 691}
692
517b0286 693extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 694_m_psrld (__m64 __m, __m64 __count)
695{
696 return _mm_srl_pi32 (__m, __count);
697}
698
517b0286 699extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 700_mm_srli_pi32 (__m64 __m, int __count)
701{
7916ca8a 702 return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count);
d65bfb46 703}
704
517b0286 705extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 706_m_psrldi (__m64 __m, int __count)
707{
708 return _mm_srli_pi32 (__m, __count);
709}
710
d65bfb46 711/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
517b0286 712extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
2feb8eca 713_mm_srl_si64 (__m64 __m, __m64 __count)
d65bfb46 714{
7916ca8a 715 return (__m64) __builtin_ia32_psrlq ((__v1di)__m, (__v1di)__count);
d65bfb46 716}
717
517b0286 718extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 719_m_psrlq (__m64 __m, __m64 __count)
720{
721 return _mm_srl_si64 (__m, __count);
722}
723
517b0286 724extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
2feb8eca 725_mm_srli_si64 (__m64 __m, int __count)
d65bfb46 726{
7916ca8a 727 return (__m64) __builtin_ia32_psrlqi ((__v1di)__m, __count);
d65bfb46 728}
729
517b0286 730extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 731_m_psrlqi (__m64 __m, int __count)
732{
733 return _mm_srli_si64 (__m, __count);
734}
735
d65bfb46 736/* Bit-wise AND the 64-bit values in M1 and M2. */
517b0286 737extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 738_mm_and_si64 (__m64 __m1, __m64 __m2)
739{
32513a88 740 return __builtin_ia32_pand (__m1, __m2);
d65bfb46 741}
742
517b0286 743extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 744_m_pand (__m64 __m1, __m64 __m2)
745{
746 return _mm_and_si64 (__m1, __m2);
747}
748
d65bfb46 749/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
750 64-bit value in M2. */
517b0286 751extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 752_mm_andnot_si64 (__m64 __m1, __m64 __m2)
753{
32513a88 754 return __builtin_ia32_pandn (__m1, __m2);
d65bfb46 755}
756
517b0286 757extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 758_m_pandn (__m64 __m1, __m64 __m2)
759{
760 return _mm_andnot_si64 (__m1, __m2);
761}
762
d65bfb46 763/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
517b0286 764extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 765_mm_or_si64 (__m64 __m1, __m64 __m2)
766{
32513a88 767 return __builtin_ia32_por (__m1, __m2);
d65bfb46 768}
769
517b0286 770extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 771_m_por (__m64 __m1, __m64 __m2)
772{
773 return _mm_or_si64 (__m1, __m2);
774}
775
d65bfb46 776/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
517b0286 777extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 778_mm_xor_si64 (__m64 __m1, __m64 __m2)
779{
32513a88 780 return __builtin_ia32_pxor (__m1, __m2);
d65bfb46 781}
782
517b0286 783extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 784_m_pxor (__m64 __m1, __m64 __m2)
785{
786 return _mm_xor_si64 (__m1, __m2);
787}
788
d65bfb46 789/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
790 test is true and zero if false. */
517b0286 791extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 792_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
793{
794 return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
795}
796
517b0286 797extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 798_m_pcmpeqb (__m64 __m1, __m64 __m2)
799{
800 return _mm_cmpeq_pi8 (__m1, __m2);
801}
802
517b0286 803extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 804_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
805{
806 return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
807}
808
517b0286 809extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 810_m_pcmpgtb (__m64 __m1, __m64 __m2)
811{
812 return _mm_cmpgt_pi8 (__m1, __m2);
813}
814
d65bfb46 815/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
816 the test is true and zero if false. */
517b0286 817extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 818_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
819{
820 return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
821}
822
517b0286 823extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 824_m_pcmpeqw (__m64 __m1, __m64 __m2)
825{
826 return _mm_cmpeq_pi16 (__m1, __m2);
827}
828
517b0286 829extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 830_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
831{
832 return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
833}
834
517b0286 835extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 836_m_pcmpgtw (__m64 __m1, __m64 __m2)
837{
838 return _mm_cmpgt_pi16 (__m1, __m2);
839}
840
d65bfb46 841/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
842 the test is true and zero if false. */
517b0286 843extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 844_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
845{
846 return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
847}
848
517b0286 849extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 850_m_pcmpeqd (__m64 __m1, __m64 __m2)
851{
852 return _mm_cmpeq_pi32 (__m1, __m2);
853}
854
517b0286 855extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 856_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
857{
858 return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
859}
860
517b0286 861extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
3024f45d 862_m_pcmpgtd (__m64 __m1, __m64 __m2)
863{
864 return _mm_cmpgt_pi32 (__m1, __m2);
865}
866
d65bfb46 867/* Creates a 64-bit zero. */
517b0286 868extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 869_mm_setzero_si64 (void)
870{
ad2c46cf 871 return (__m64)0LL;
d65bfb46 872}
873
874/* Creates a vector of two 32-bit values; I0 is least significant. */
517b0286 875extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 876_mm_set_pi32 (int __i1, int __i0)
877{
ad2c46cf 878 return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
d65bfb46 879}
880
881/* Creates a vector of four 16-bit values; W0 is least significant. */
517b0286 882extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 883_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
884{
ad2c46cf 885 return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
d65bfb46 886}
887
888/* Creates a vector of eight 8-bit values; B0 is least significant. */
517b0286 889extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 890_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
891 char __b3, char __b2, char __b1, char __b0)
892{
ad2c46cf 893 return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3,
894 __b4, __b5, __b6, __b7);
d65bfb46 895}
896
897/* Similar, but with the arguments in reverse order. */
517b0286 898extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 899_mm_setr_pi32 (int __i0, int __i1)
900{
901 return _mm_set_pi32 (__i1, __i0);
902}
903
517b0286 904extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 905_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
906{
907 return _mm_set_pi16 (__w3, __w2, __w1, __w0);
908}
909
517b0286 910extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 911_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
912 char __b4, char __b5, char __b6, char __b7)
913{
914 return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
915}
916
917/* Creates a vector of two 32-bit values, both elements containing I. */
517b0286 918extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 919_mm_set1_pi32 (int __i)
920{
921 return _mm_set_pi32 (__i, __i);
922}
923
924/* Creates a vector of four 16-bit values, all elements containing W. */
517b0286 925extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 926_mm_set1_pi16 (short __w)
927{
ad2c46cf 928 return _mm_set_pi16 (__w, __w, __w, __w);
d65bfb46 929}
930
086c3de3 931/* Creates a vector of eight 8-bit values, all elements containing B. */
517b0286 932extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
d65bfb46 933_mm_set1_pi8 (char __b)
934{
ad2c46cf 935 return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
d65bfb46 936}
ef21d40e 937#ifdef __DISABLE_MMX__
938#undef __DISABLE_MMX__
939#pragma GCC pop_options
940#endif /* __DISABLE_MMX__ */
d65bfb46 941
942#endif /* _MMINTRIN_H_INCLUDED */