+2008-11-13 Uros Bizjak <ubizjak@gmail.com>
+
+ Backport from mainline:
+ 2008-06-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR rtl-optimization/36438
+ * cse.c (fold_rtx) [ASHIFT, LSHIFTRT, ASHIFTRT]: Break out early
+ for vector shifts with constant scalar shift operands.
+
2008-11-12 Jason Merrill <jason@redhat.com>
PR c++/38007
&& exact_log2 (- INTVAL (const_arg1)) >= 0)))
break;
+ /* ??? Vector mode shifts by scalar
+ shift operand are not supported yet. */
+ if (is_shift && VECTOR_MODE_P (mode))
+ break;
+
if (is_shift
&& (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode)
|| INTVAL (inner_const) < 0))
+2008-11-13 Uros Bizjak <ubizjak@gmail.com>
+
+ Backport from mainline:
+ 2008-06-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR rtl-optimization/36438
+ * gcc.target/i386/pr36438.c
+
2008-11-13 Jason Merrill <jason@redhat.com>
PR c++/37932
PR middle-end/37807
PR middle-end/37809
- * gcc/testsuite/gcc.target/i386/mmx-8.c: New test.
+ * gcc.target/i386/mmx-8.c: New test.
2008-10-07 H.J. Lu <hongjiu.lu@intel.com>
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mmmx" } */
+
+#include <mmintrin.h>
+
+extern __m64 SetS16 (unsigned short, unsigned short,
+ unsigned short, unsigned short);
+
+void
+foo (__m64 * dest)
+{
+ __m64 mask = SetS16 (0x00FF, 0xFF00, 0x0000, 0x00FF);
+
+ mask = _mm_slli_si64 (mask, 8);
+ mask = _mm_slli_si64 (mask, 8);
+
+ *dest = mask;
+
+ _mm_empty ();
+}