]> git.ipfire.org Git - people/ms/linux.git/blobdiff - arch/x86/lib/usercopy_32.c
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / arch / x86 / lib / usercopy_32.c
index e2f5e21c03b3044a14ed12cb460ea2e3c0a0e13f..4b22130919b9ef91d05c9064a8c399c3c4257f54 100644 (file)
@@ -42,11 +42,13 @@ do {                                                                        \
        int __d0;                                                       \
        might_fault();                                                  \
        __asm__ __volatile__(                                           \
+               __COPYUSER_SET_ES                                       \
                ASM_STAC "\n"                                           \
                "0:     rep; stosl\n"                                   \
                "       movl %2,%0\n"                                   \
                "1:     rep; stosb\n"                                   \
                "2: " ASM_CLAC "\n"                                     \
+               __COPYUSER_RESTORE_ES                                   \
                ".section .fixup,\"ax\"\n"                              \
                "3:     lea 0(%2,%0,4),%0\n"                            \
                "       jmp 2b\n"                                       \
@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
 
 #ifdef CONFIG_X86_INTEL_USERCOPY
 static unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
 {
        int d0, d1;
        __asm__ __volatile__(
@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
                       "       .align 2,0x90\n"
                       "3:     movl 0(%4), %%eax\n"
                       "4:     movl 4(%4), %%edx\n"
-                      "5:     movl %%eax, 0(%3)\n"
-                      "6:     movl %%edx, 4(%3)\n"
+                      "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
+                      "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
                       "7:     movl 8(%4), %%eax\n"
                       "8:     movl 12(%4),%%edx\n"
-                      "9:     movl %%eax, 8(%3)\n"
-                      "10:    movl %%edx, 12(%3)\n"
+                      "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
+                      "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
                       "11:    movl 16(%4), %%eax\n"
                       "12:    movl 20(%4), %%edx\n"
-                      "13:    movl %%eax, 16(%3)\n"
-                      "14:    movl %%edx, 20(%3)\n"
+                      "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
+                      "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
                       "15:    movl 24(%4), %%eax\n"
                       "16:    movl 28(%4), %%edx\n"
-                      "17:    movl %%eax, 24(%3)\n"
-                      "18:    movl %%edx, 28(%3)\n"
+                      "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
+                      "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
                       "19:    movl 32(%4), %%eax\n"
                       "20:    movl 36(%4), %%edx\n"
-                      "21:    movl %%eax, 32(%3)\n"
-                      "22:    movl %%edx, 36(%3)\n"
+                      "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
+                      "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
                       "23:    movl 40(%4), %%eax\n"
                       "24:    movl 44(%4), %%edx\n"
-                      "25:    movl %%eax, 40(%3)\n"
-                      "26:    movl %%edx, 44(%3)\n"
+                      "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
+                      "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
                       "27:    movl 48(%4), %%eax\n"
                       "28:    movl 52(%4), %%edx\n"
-                      "29:    movl %%eax, 48(%3)\n"
-                      "30:    movl %%edx, 52(%3)\n"
+                      "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
+                      "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
                       "31:    movl 56(%4), %%eax\n"
                       "32:    movl 60(%4), %%edx\n"
-                      "33:    movl %%eax, 56(%3)\n"
-                      "34:    movl %%edx, 60(%3)\n"
+                      "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
+                      "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
                       "       addl $-64, %0\n"
                       "       addl $64, %4\n"
                       "       addl $64, %3\n"
@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
                       "       shrl  $2, %0\n"
                       "       andl  $3, %%eax\n"
                       "       cld\n"
+                      __COPYUSER_SET_ES
                       "99:    rep; movsl\n"
                       "36:    movl %%eax, %0\n"
                       "37:    rep; movsb\n"
                       "100:\n"
+                      __COPYUSER_RESTORE_ES
+                      ".section .fixup,\"ax\"\n"
+                      "101:   lea 0(%%eax,%0,4),%0\n"
+                      "       jmp 100b\n"
+                      ".previous\n"
+                      _ASM_EXTABLE(1b,100b)
+                      _ASM_EXTABLE(2b,100b)
+                      _ASM_EXTABLE(3b,100b)
+                      _ASM_EXTABLE(4b,100b)
+                      _ASM_EXTABLE(5b,100b)
+                      _ASM_EXTABLE(6b,100b)
+                      _ASM_EXTABLE(7b,100b)
+                      _ASM_EXTABLE(8b,100b)
+                      _ASM_EXTABLE(9b,100b)
+                      _ASM_EXTABLE(10b,100b)
+                      _ASM_EXTABLE(11b,100b)
+                      _ASM_EXTABLE(12b,100b)
+                      _ASM_EXTABLE(13b,100b)
+                      _ASM_EXTABLE(14b,100b)
+                      _ASM_EXTABLE(15b,100b)
+                      _ASM_EXTABLE(16b,100b)
+                      _ASM_EXTABLE(17b,100b)
+                      _ASM_EXTABLE(18b,100b)
+                      _ASM_EXTABLE(19b,100b)
+                      _ASM_EXTABLE(20b,100b)
+                      _ASM_EXTABLE(21b,100b)
+                      _ASM_EXTABLE(22b,100b)
+                      _ASM_EXTABLE(23b,100b)
+                      _ASM_EXTABLE(24b,100b)
+                      _ASM_EXTABLE(25b,100b)
+                      _ASM_EXTABLE(26b,100b)
+                      _ASM_EXTABLE(27b,100b)
+                      _ASM_EXTABLE(28b,100b)
+                      _ASM_EXTABLE(29b,100b)
+                      _ASM_EXTABLE(30b,100b)
+                      _ASM_EXTABLE(31b,100b)
+                      _ASM_EXTABLE(32b,100b)
+                      _ASM_EXTABLE(33b,100b)
+                      _ASM_EXTABLE(34b,100b)
+                      _ASM_EXTABLE(35b,100b)
+                      _ASM_EXTABLE(36b,100b)
+                      _ASM_EXTABLE(37b,100b)
+                      _ASM_EXTABLE(99b,101b)
+                      : "=&c"(size), "=&D" (d0), "=&S" (d1)
+                      :  "1"(to), "2"(from), "0"(size)
+                      : "eax", "edx", "memory");
+       return size;
+}
+
+static unsigned long
+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
+{
+       int d0, d1;
+       __asm__ __volatile__(
+                      "       .align 2,0x90\n"
+                      "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "       cmpl $67, %0\n"
+                      "       jbe 3f\n"
+                      "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
+                      "       .align 2,0x90\n"
+                      "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
+                      "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
+                      "5:     movl %%eax, 0(%3)\n"
+                      "6:     movl %%edx, 4(%3)\n"
+                      "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
+                      "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
+                      "9:     movl %%eax, 8(%3)\n"
+                      "10:    movl %%edx, 12(%3)\n"
+                      "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
+                      "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
+                      "13:    movl %%eax, 16(%3)\n"
+                      "14:    movl %%edx, 20(%3)\n"
+                      "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
+                      "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
+                      "17:    movl %%eax, 24(%3)\n"
+                      "18:    movl %%edx, 28(%3)\n"
+                      "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
+                      "21:    movl %%eax, 32(%3)\n"
+                      "22:    movl %%edx, 36(%3)\n"
+                      "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
+                      "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
+                      "25:    movl %%eax, 40(%3)\n"
+                      "26:    movl %%edx, 44(%3)\n"
+                      "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
+                      "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
+                      "29:    movl %%eax, 48(%3)\n"
+                      "30:    movl %%edx, 52(%3)\n"
+                      "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
+                      "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
+                      "33:    movl %%eax, 56(%3)\n"
+                      "34:    movl %%edx, 60(%3)\n"
+                      "       addl $-64, %0\n"
+                      "       addl $64, %4\n"
+                      "       addl $64, %3\n"
+                      "       cmpl $63, %0\n"
+                      "       ja  1b\n"
+                      "35:    movl  %0, %%eax\n"
+                      "       shrl  $2, %0\n"
+                      "       andl  $3, %%eax\n"
+                      "       cld\n"
+                      "99:    rep; "__copyuser_seg" movsl\n"
+                      "36:    movl %%eax, %0\n"
+                      "37:    rep; "__copyuser_seg" movsb\n"
+                      "100:\n"
                       ".section .fixup,\"ax\"\n"
                       "101:   lea 0(%%eax,%0,4),%0\n"
                       "       jmp 100b\n"
@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
        int d0, d1;
        __asm__ __volatile__(
                       "        .align 2,0x90\n"
-                      "0:      movl 32(%4), %%eax\n"
+                      "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
                       "        cmpl $67, %0\n"
                       "        jbe 2f\n"
-                      "1:      movl 64(%4), %%eax\n"
+                      "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
                       "        .align 2,0x90\n"
-                      "2:      movl 0(%4), %%eax\n"
-                      "21:     movl 4(%4), %%edx\n"
+                      "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+                      "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
                       "        movl %%eax, 0(%3)\n"
                       "        movl %%edx, 4(%3)\n"
-                      "3:      movl 8(%4), %%eax\n"
-                      "31:     movl 12(%4),%%edx\n"
+                      "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+                      "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
                       "        movl %%eax, 8(%3)\n"
                       "        movl %%edx, 12(%3)\n"
-                      "4:      movl 16(%4), %%eax\n"
-                      "41:     movl 20(%4), %%edx\n"
+                      "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+                      "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
                       "        movl %%eax, 16(%3)\n"
                       "        movl %%edx, 20(%3)\n"
-                      "10:     movl 24(%4), %%eax\n"
-                      "51:     movl 28(%4), %%edx\n"
+                      "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+                      "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
                       "        movl %%eax, 24(%3)\n"
                       "        movl %%edx, 28(%3)\n"
-                      "11:     movl 32(%4), %%eax\n"
-                      "61:     movl 36(%4), %%edx\n"
+                      "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
                       "        movl %%eax, 32(%3)\n"
                       "        movl %%edx, 36(%3)\n"
-                      "12:     movl 40(%4), %%eax\n"
-                      "71:     movl 44(%4), %%edx\n"
+                      "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+                      "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
                       "        movl %%eax, 40(%3)\n"
                       "        movl %%edx, 44(%3)\n"
-                      "13:     movl 48(%4), %%eax\n"
-                      "81:     movl 52(%4), %%edx\n"
+                      "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+                      "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
                       "        movl %%eax, 48(%3)\n"
                       "        movl %%edx, 52(%3)\n"
-                      "14:     movl 56(%4), %%eax\n"
-                      "91:     movl 60(%4), %%edx\n"
+                      "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+                      "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
                       "        movl %%eax, 56(%3)\n"
                       "        movl %%edx, 60(%3)\n"
                       "        addl $-64, %0\n"
@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
                       "        shrl  $2, %0\n"
                       "        andl $3, %%eax\n"
                       "        cld\n"
-                      "6:      rep; movsl\n"
+                      "6:      rep; "__copyuser_seg" movsl\n"
                       "        movl %%eax,%0\n"
-                      "7:      rep; movsb\n"
+                      "7:      rep; "__copyuser_seg" movsb\n"
                       "8:\n"
                       ".section .fixup,\"ax\"\n"
                       "9:      lea 0(%%eax,%0,4),%0\n"
@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
 
        __asm__ __volatile__(
               "        .align 2,0x90\n"
-              "0:      movl 32(%4), %%eax\n"
+              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
               "        cmpl $67, %0\n"
               "        jbe 2f\n"
-              "1:      movl 64(%4), %%eax\n"
+              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
               "        .align 2,0x90\n"
-              "2:      movl 0(%4), %%eax\n"
-              "21:     movl 4(%4), %%edx\n"
+              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
               "        movnti %%eax, 0(%3)\n"
               "        movnti %%edx, 4(%3)\n"
-              "3:      movl 8(%4), %%eax\n"
-              "31:     movl 12(%4),%%edx\n"
+              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
               "        movnti %%eax, 8(%3)\n"
               "        movnti %%edx, 12(%3)\n"
-              "4:      movl 16(%4), %%eax\n"
-              "41:     movl 20(%4), %%edx\n"
+              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
               "        movnti %%eax, 16(%3)\n"
               "        movnti %%edx, 20(%3)\n"
-              "10:     movl 24(%4), %%eax\n"
-              "51:     movl 28(%4), %%edx\n"
+              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
               "        movnti %%eax, 24(%3)\n"
               "        movnti %%edx, 28(%3)\n"
-              "11:     movl 32(%4), %%eax\n"
-              "61:     movl 36(%4), %%edx\n"
+              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
               "        movnti %%eax, 32(%3)\n"
               "        movnti %%edx, 36(%3)\n"
-              "12:     movl 40(%4), %%eax\n"
-              "71:     movl 44(%4), %%edx\n"
+              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
               "        movnti %%eax, 40(%3)\n"
               "        movnti %%edx, 44(%3)\n"
-              "13:     movl 48(%4), %%eax\n"
-              "81:     movl 52(%4), %%edx\n"
+              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
               "        movnti %%eax, 48(%3)\n"
               "        movnti %%edx, 52(%3)\n"
-              "14:     movl 56(%4), %%eax\n"
-              "91:     movl 60(%4), %%edx\n"
+              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
               "        movnti %%eax, 56(%3)\n"
               "        movnti %%edx, 60(%3)\n"
               "        addl $-64, %0\n"
@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
               "        shrl  $2, %0\n"
               "        andl $3, %%eax\n"
               "        cld\n"
-              "6:      rep; movsl\n"
+              "6:      rep; "__copyuser_seg" movsl\n"
               "        movl %%eax,%0\n"
-              "7:      rep; movsb\n"
+              "7:      rep; "__copyuser_seg" movsb\n"
               "8:\n"
               ".section .fixup,\"ax\"\n"
               "9:      lea 0(%%eax,%0,4),%0\n"
@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
 
        __asm__ __volatile__(
               "        .align 2,0x90\n"
-              "0:      movl 32(%4), %%eax\n"
+              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
               "        cmpl $67, %0\n"
               "        jbe 2f\n"
-              "1:      movl 64(%4), %%eax\n"
+              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
               "        .align 2,0x90\n"
-              "2:      movl 0(%4), %%eax\n"
-              "21:     movl 4(%4), %%edx\n"
+              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
               "        movnti %%eax, 0(%3)\n"
               "        movnti %%edx, 4(%3)\n"
-              "3:      movl 8(%4), %%eax\n"
-              "31:     movl 12(%4),%%edx\n"
+              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
               "        movnti %%eax, 8(%3)\n"
               "        movnti %%edx, 12(%3)\n"
-              "4:      movl 16(%4), %%eax\n"
-              "41:     movl 20(%4), %%edx\n"
+              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
               "        movnti %%eax, 16(%3)\n"
               "        movnti %%edx, 20(%3)\n"
-              "10:     movl 24(%4), %%eax\n"
-              "51:     movl 28(%4), %%edx\n"
+              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
               "        movnti %%eax, 24(%3)\n"
               "        movnti %%edx, 28(%3)\n"
-              "11:     movl 32(%4), %%eax\n"
-              "61:     movl 36(%4), %%edx\n"
+              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
               "        movnti %%eax, 32(%3)\n"
               "        movnti %%edx, 36(%3)\n"
-              "12:     movl 40(%4), %%eax\n"
-              "71:     movl 44(%4), %%edx\n"
+              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
               "        movnti %%eax, 40(%3)\n"
               "        movnti %%edx, 44(%3)\n"
-              "13:     movl 48(%4), %%eax\n"
-              "81:     movl 52(%4), %%edx\n"
+              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
               "        movnti %%eax, 48(%3)\n"
               "        movnti %%edx, 52(%3)\n"
-              "14:     movl 56(%4), %%eax\n"
-              "91:     movl 60(%4), %%edx\n"
+              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
               "        movnti %%eax, 56(%3)\n"
               "        movnti %%edx, 60(%3)\n"
               "        addl $-64, %0\n"
@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
               "        shrl  $2, %0\n"
               "        andl $3, %%eax\n"
               "        cld\n"
-              "6:      rep; movsl\n"
+              "6:      rep; "__copyuser_seg" movsl\n"
               "        movl %%eax,%0\n"
-              "7:      rep; movsb\n"
+              "7:      rep; "__copyuser_seg" movsb\n"
               "8:\n"
               ".section .fixup,\"ax\"\n"
               "9:      lea 0(%%eax,%0,4),%0\n"
@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
  */
 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
                                        unsigned long size);
-unsigned long __copy_user_intel(void __user *to, const void *from,
+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
+                                       unsigned long size);
+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
                                        unsigned long size);
 unsigned long __copy_user_zeroing_intel_nocache(void *to,
                                const void __user *from, unsigned long size);
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
-#define __copy_user(to, from, size)                                    \
+#define __copy_user(to, from, size, prefix, set, restore)              \
 do {                                                                   \
        int __d0, __d1, __d2;                                           \
        __asm__ __volatile__(                                           \
+               set                                                     \
                "       cmp  $7,%0\n"                                   \
                "       jbe  1f\n"                                      \
                "       movl %1,%0\n"                                   \
                "       negl %0\n"                                      \
                "       andl $7,%0\n"                                   \
                "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
+               "4:     rep; "prefix"movsb\n"                           \
                "       movl %3,%0\n"                                   \
                "       shrl $2,%0\n"                                   \
                "       andl $3,%3\n"                                   \
                "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
+               "0:     rep; "prefix"movsl\n"                           \
                "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
+               "1:     rep; "prefix"movsb\n"                           \
                "2:\n"                                                  \
+               restore                                                 \
                ".section .fixup,\"ax\"\n"                              \
                "5:     addl %3,%0\n"                                   \
                "       jmp 2b\n"                                       \
@@ -538,14 +650,14 @@ do {                                                                      \
                "       negl %0\n"                                      \
                "       andl $7,%0\n"                                   \
                "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
+               "4:     rep; "__copyuser_seg"movsb\n"                   \
                "       movl %3,%0\n"                                   \
                "       shrl $2,%0\n"                                   \
                "       andl $3,%3\n"                                   \
                "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
+               "0:     rep; "__copyuser_seg"movsl\n"                   \
                "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
+               "1:     rep; "__copyuser_seg"movsb\n"                   \
                "2:\n"                                                  \
                ".section .fixup,\"ax\"\n"                              \
                "5:     addl %3,%0\n"                                   \
@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
 {
        stac();
        if (movsl_is_ok(to, from, n))
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
        else
-               n = __copy_user_intel(to, from, n);
+               n = __generic_copy_to_user_intel(to, from, n);
        clac();
        return n;
 }
@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
 {
        stac();
        if (movsl_is_ok(to, from, n))
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, __copyuser_seg, "", "");
        else
-               n = __copy_user_intel((void __user *)to,
-                                     (const void *)from, n);
+               n = __generic_copy_from_user_intel(to, from, n);
        clac();
        return n;
 }
@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
        if (n > 64 && cpu_has_xmm2)
                n = __copy_user_intel_nocache(to, from, n);
        else
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, __copyuser_seg, "", "");
 #else
-       __copy_user(to, from, n);
+       __copy_user(to, from, n, __copyuser_seg, "", "");
 #endif
        clac();
        return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
 
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+void __set_fs(mm_segment_t x)
 {
-       if (access_ok(VERIFY_WRITE, to, n))
-               n = __copy_to_user(to, from, n);
-       return n;
+       switch (x.seg) {
+       case 0:
+               loadsegment(gs, 0);
+               break;
+       case TASK_SIZE_MAX:
+               loadsegment(gs, __USER_DS);
+               break;
+       case -1UL:
+               loadsegment(gs, __KERNEL_DS);
+               break;
+       default:
+               BUG();
+       }
 }
-EXPORT_SYMBOL(_copy_to_user);
+EXPORT_SYMBOL(__set_fs);
 
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+void set_fs(mm_segment_t x)
 {
-       if (access_ok(VERIFY_READ, from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
+       current_thread_info()->addr_limit = x;
+       __set_fs(x);
 }
-EXPORT_SYMBOL(_copy_from_user);
+EXPORT_SYMBOL(set_fs);
+#endif