]> git.ipfire.org Git - thirdparty/glibc.git/blobdiff - sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
index d56ff877ac26bf6a91691308fed0d88bbe9e0c7a..91fd096ad0e9465c5d6faa71e2ff1fcc34623323 100644 (file)
@@ -1,5 +1,4 @@
-/* Copyright (C) 1992, 1997, 1998, 1999, 2000, 2001, 2002
-   Free Software Foundation, Inc.
+/* Copyright (C) 1992-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
    Lesser General Public License for more details.
 
    You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
-               
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
 /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
 
 #ifndef _LINUX_POWERPC_SYSDEP_H
 #define _LINUX_POWERPC_SYSDEP_H 1
 
+#include <sysdeps/unix/sysv/linux/sysdep.h>
 #include <sysdeps/unix/powerpc/sysdep.h>
+#include <tls.h>
 
 /* Define __set_errno() for INLINE_SYSCALL macro below.  */
 #ifndef __ASSEMBLER__
    of the kernel.  But these symbols do not follow the SYS_* syntax
    so we have to redefine the `SYS_ify' macro here.  */
 #undef SYS_ify
-#ifdef __STDC__
-# define SYS_ify(syscall_name) __NR_##syscall_name
-#else
-# define SYS_ify(syscall_name) __NR_/**/syscall_name
-#endif
+#define SYS_ify(syscall_name)  __NR_##syscall_name
 
 #ifdef __ASSEMBLER__
 
 /* This seems to always be the case on PPC.  */
-#define ALIGNARG(log2) log2
-/* For ELF we need the `.type' directive to make shared libs work right.  */
-#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg;
-#define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+# define ALIGNARG(log2) log2
+# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
 
-#endif /* __ASSEMBLER__ */
+#endif /* __ASSEMBLER__ */
 
-#undef INLINE_SYSCALL
-#if 1
-#define INLINE_SYSCALL(name, nr, args...)      \
-  ({                                           \
-    DECLARGS_##nr;                             \
-    long ret, err;                             \
-    LOADARGS_##nr(name, args);                 \
-    __asm __volatile ("sc\n\t"                 \
-                     "mfcr     %1\n\t"         \
-                     : "=r" (r3), "=r" (err)   \
-                     : ASM_INPUT_##nr          \
-                     : "cc", "memory");        \
-    ret = r3;                                  \
-    if (err & 1 << 28)                         \
-      {                                                \
-       __set_errno (ret);                      \
-       ret = -1L;                              \
-      }                                                \
-    ret;                                       \
+/* This version is for internal uses when there is no desire
+   to set errno */
+#define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...)   \
+  ({                                                                         \
+    type sc_ret = ENOSYS;                                                    \
+                                                                             \
+    __typeof (__vdso_##name) vdsop = __vdso_##name;                          \
+    PTR_DEMANGLE (vdsop);                                                    \
+    if (vdsop != NULL)                                                       \
+      sc_ret =                                                               \
+        INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, type, nr, ##args);          \
+    else                                                                     \
+      err = 1 << 28;                                                         \
+    sc_ret;                                                                  \
   })
 
-#define DECLARGS_0 register long r0 __asm__ ("r0");    \
-                  register long r3 __asm__ ("r3")
-#define DECLARGS_1 DECLARGS_0
-#define DECLARGS_2 DECLARGS_1; register long r4 __asm__ ("r4")
-#define DECLARGS_3 DECLARGS_2; register long r5 __asm__ ("r5")
-#define DECLARGS_4 DECLARGS_3; register long r6 __asm__ ("r6")
-#define DECLARGS_5 DECLARGS_4; register long r7 __asm__ ("r7")
-#define DECLARGS_6 DECLARGS_5; register long r8 __asm__ ("r8")
-
-#define LOADARGS_0(name) \
-       r0 = __NR_##name
-#define LOADARGS_1(name, arg1) \
-       LOADARGS_0(name); \
-       r3 = (long) (arg1)
-#define LOADARGS_2(name, arg1, arg2) \
-       LOADARGS_1(name, arg1); \
-       r4 = (long) (arg2)
-#define LOADARGS_3(name, arg1, arg2, arg3) \
-       LOADARGS_2(name, arg1, arg2); \
-       r5 = (long) (arg3)
-#define LOADARGS_4(name, arg1, arg2, arg3, arg4) \
-       LOADARGS_3(name, arg1, arg2, arg3); \
-       r6 = (long) (arg4)
-#define LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \
-       LOADARGS_4(name, arg1, arg2, arg3, arg4); \
-       r7 = (long) (arg5)
-#define LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
-       LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \
-       r8 = (long) (arg6)
-
-#define ASM_INPUT_0 "r" (r0)
-#define ASM_INPUT_1 ASM_INPUT_0, "0" (r3)
-#define ASM_INPUT_2 ASM_INPUT_1, "r" (r4)
-#define ASM_INPUT_3 ASM_INPUT_2, "r" (r5)
-#define ASM_INPUT_4 ASM_INPUT_3, "r" (r6)
-#define ASM_INPUT_5 ASM_INPUT_4, "r" (r7)
-#define ASM_INPUT_6 ASM_INPUT_5, "r" (r8)
+/* List of system calls which are supported as vsyscalls.  */
+#define HAVE_CLOCK_GETRES_VSYSCALL     1
+#define HAVE_CLOCK_GETTIME_VSYSCALL    1
+#define HAVE_GETCPU_VSYSCALL           1
+
+/* Define a macro which expands inline into the wrapper code for a system
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno. This returns just what the kernel
+   gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
+   the negation of the return value in the kernel gets reverted.  */
+
+#define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...)    \
+  ({                                                                   \
+    register void *r0  __asm__ ("r0");                                 \
+    register long int r3  __asm__ ("r3");                              \
+    register long int r4  __asm__ ("r4");                              \
+    register long int r5  __asm__ ("r5");                              \
+    register long int r6  __asm__ ("r6");                              \
+    register long int r7  __asm__ ("r7");                              \
+    register long int r8  __asm__ ("r8");                              \
+    register type rval  __asm__ ("r3");                                        \
+    LOADARGS_##nr (funcptr, args);                                     \
+    __asm__ __volatile__                                               \
+      ("mtctr %0\n\t"                                                  \
+       "bctrl\n\t"                                                     \
+       "mfcr  %0\n\t"                                                  \
+       "0:"                                                            \
+       : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5),  "+r" (r6),        \
+         "+r" (r7), "+r" (r8)                                          \
+       : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory");   \
+    err = (long int) r0;                                               \
+    __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3));                        \
+    rval;                                                              \
+  })
+
+#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...)              \
+  INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
+
+#undef INLINE_SYSCALL
 
-#else
 /* This version is for kernels that implement system calls that
    behave like function calls as far as register saving.  */
-#define INLINE_SYSCALL(name, nr, args...)                      \
-  ({                                                           \
-    register long r0 __asm__ ("r0");                           \
-    register long r3 __asm__ ("r3");                           \
-    register long r4 __asm__ ("r4");                           \
-    register long r5 __asm__ ("r5");                           \
-    register long r6 __asm__ ("r6");                           \
-    register long r7 __asm__ ("r7");                           \
-    register long r8 __asm__ ("r8");                           \
-    long ret, err;                                             \
-    LOADARGS_##nr(name, args);                                 \
-    __asm __volatile ("sc\n\t"                                 \
-                     "mfcr     %7\n\t"                         \
-                     : "=r" (r0), "=r" (r3), "=r" (r4),        \
-                       "=r" (r5), "=r" (r6), "=r" (r7),        \
-                       "=r" (r8), "=r" (err)                   \
-                     : ASM_INPUT_##nr                          \
-                     : "r9", "r10", "r11", "r12",              \
-                       "fr0", "fr1", "fr2", "fr3",             \
-                       "fr4", "fr5", "fr6", "fr7",             \
-                       "fr8", "fr9", "fr10", "fr11",           \
-                       "fr12", "fr13",                         \
-                       "ctr", "lr",                            \
-                       "cr0", "cr1", "cr5", "cr6", "cr7",      \
-                       "memory");                              \
-    ret = r3;                                                  \
-    if (err & 1 << 28)                                         \
-      {                                                                \
-       __set_errno (ret);                                      \
-       ret = -1L;                                              \
-      }                                                                \
-    ret;                                                       \
+#define INLINE_SYSCALL(name, nr, args...)                              \
+  ({                                                                   \
+    INTERNAL_SYSCALL_DECL (sc_err);                                    \
+    long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args);       \
+    if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))                     \
+      {                                                                        \
+        __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));         \
+        sc_ret = -1L;                                                  \
+      }                                                                        \
+    sc_ret;                                                            \
   })
 
-#define LOADARGS_0(name) \
-       r0 = __NR_##name
-#define LOADARGS_1(name, arg1) \
-       LOADARGS_0(name); \
-       r3 = (long) (arg1)
-#define LOADARGS_2(name, arg1, arg2) \
-       LOADARGS_1(name, arg1); \
-       r4 = (long) (arg2)
-#define LOADARGS_3(name, arg1, arg2, arg3) \
-       LOADARGS_2(name, arg1, arg2); \
-       r5 = (long) (arg3)
-#define LOADARGS_4(name, arg1, arg2, arg3, arg4) \
-       LOADARGS_3(name, arg1, arg2, arg3); \
-       r6 = (long) (arg4)
-#define LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \
-       LOADARGS_4(name, arg1, arg2, arg3, arg4); \
-       r7 = (long) (arg5)
-#define LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
-       LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \
-       r8 = (long) (arg6)
+/* Define a macro which expands inline into the wrapper code for a system
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno. This returns just what the kernel
+   gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
+   the negation of the return value in the kernel gets reverted.  */
+
+#undef INTERNAL_SYSCALL
+#define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
+  ({                                                                   \
+    register long int r0  __asm__ ("r0");                              \
+    register long int r3  __asm__ ("r3");                              \
+    register long int r4  __asm__ ("r4");                              \
+    register long int r5  __asm__ ("r5");                              \
+    register long int r6  __asm__ ("r6");                              \
+    register long int r7  __asm__ ("r7");                              \
+    register long int r8  __asm__ ("r8");                              \
+    LOADARGS_##nr (name, ##args);                                      \
+    ABORT_TRANSACTION;                                                 \
+    __asm__ __volatile__                                               \
+      ("sc\n\t"                                                                \
+       "mfcr  %0\n\t"                                                  \
+       "0:"                                                            \
+       : "=&r" (r0),                                                   \
+         "=&r" (r3), "=&r" (r4), "=&r" (r5),                           \
+         "=&r" (r6), "=&r" (r7), "=&r" (r8)                            \
+       : ASM_INPUT_##nr                                                        \
+       : "r9", "r10", "r11", "r12",                                    \
+         "cr0", "ctr", "memory");                                      \
+         err = r0;  \
+    r3;  \
+  })
+#define INTERNAL_SYSCALL(name, err, nr, args...)                       \
+  INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
+
+#undef INTERNAL_SYSCALL_DECL
+#define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
+
+#undef INTERNAL_SYSCALL_ERROR_P
+#define INTERNAL_SYSCALL_ERROR_P(val, err) \
+  ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
+
+#undef INTERNAL_SYSCALL_ERRNO
+#define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
+
+#define LOADARGS_0(name, dummy) \
+       r0 = name
+#define LOADARGS_1(name, __arg1) \
+       long int arg1 = (long int) (__arg1); \
+       LOADARGS_0(name, 0); \
+       extern void __illegally_sized_syscall_arg1 (void); \
+       if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
+         __illegally_sized_syscall_arg1 (); \
+       r3 = arg1
+#define LOADARGS_2(name, __arg1, __arg2) \
+       long int arg2 = (long int) (__arg2); \
+       LOADARGS_1(name, __arg1); \
+       extern void __illegally_sized_syscall_arg2 (void); \
+       if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
+         __illegally_sized_syscall_arg2 (); \
+       r4 = arg2
+#define LOADARGS_3(name, __arg1, __arg2, __arg3) \
+       long int arg3 = (long int) (__arg3); \
+       LOADARGS_2(name, __arg1, __arg2); \
+       extern void __illegally_sized_syscall_arg3 (void); \
+       if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
+         __illegally_sized_syscall_arg3 (); \
+       r5 = arg3
+#define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
+       long int arg4 = (long int) (__arg4); \
+       LOADARGS_3(name, __arg1, __arg2, __arg3); \
+       extern void __illegally_sized_syscall_arg4 (void); \
+       if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
+         __illegally_sized_syscall_arg4 (); \
+       r6 = arg4
+#define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
+       long int arg5 = (long int) (__arg5); \
+       LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
+       extern void __illegally_sized_syscall_arg5 (void); \
+       if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
+         __illegally_sized_syscall_arg5 (); \
+       r7 = arg5
+#define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
+       long int arg6 = (long int) (__arg6); \
+       LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
+       extern void __illegally_sized_syscall_arg6 (void); \
+       if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
+         __illegally_sized_syscall_arg6 (); \
+       r8 = arg6
 
 #define ASM_INPUT_0 "0" (r0)
 #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
 
+
+/* Pointer mangling support.  */
+#if IS_IN (rtld)
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+   earlier than the descriptor is initialized.  */
+#else
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE(reg, tmpreg) \
+       ld      tmpreg,POINTER_GUARD(r13); \
+       xor     reg,tmpreg,reg
+#  define PTR_MANGLE2(reg, tmpreg) \
+       xor     reg,tmpreg,reg
+#  define PTR_MANGLE3(destreg, reg, tmpreg) \
+       ld      tmpreg,POINTER_GUARD(r13); \
+       xor     destreg,tmpreg,reg
+#  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
+#  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
+#  define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
+# else
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+#  define PTR_DEMANGLE(var)    PTR_MANGLE (var)
+# endif
 #endif
 
 #endif /* linux/powerpc/powerpc64/sysdep.h */